Merge "bluetooth: Check for hcon during during sock_close" into msm-3.4
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
new file mode 100644
index 0000000..2ba7341
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -0,0 +1,98 @@
+Qualcomm's QPNP PMIC Voltage ADC Arbiter
+
+QPNP PMIC Voltage ADC (VADC) provides interface to clients to read
+Voltage. A 15 bit ADC is used for Voltage measurements. There are multiple
+peripherals to the VADC and the scope of the driver is to provide interface
+for the USR peripheral of the VADC.
+
+VADC node
+
+Required properties:
+- compatible : should be "qcom,qpnp-vadc" for Voltage ADC driver.
+- reg : offset and length of the PMIC Aribter register map.
+- interrupts : The USR bank peripheral VADC interrupt.
+- qcom,adc-bit-resolution : Bit resolution of the ADC.
+- qcom,adc-vdd-reference : Voltage reference used by the ADC.
+
+Channel nodes
+NOTE: Atleast one Channel node is required.
+
+Required properties:
+- label : Channel name used for sysfs entry.
+- qcom,channel-num : Channel number associated to the AMUX input.
+- qcom,decimation : Sampling rate to use for the individual channel measurement.
+		    Select from following unsigned int.
+		    0 : 512
+		    1 : 1K
+		    2 : 2K
+		    3 : 4K
+- qcom,pre-div-channel-scaling : Pre-div used for the channel before the signal
+				 is being measured.
+- qcom,calibration-type : Reference voltage to use for channel calibration.
+			  Channel calibration is dependendent on the channel.
+			  Certain channels like XO_THERM, BATT_THERM use ratiometric
+			  calibration. Most other channels fall under absolute calibration.
+			  Select from the following strings.
+			  "absolute" : Uses the 625mv and 1.25V reference channels.
+			  "ratiometric" : Uses the reference Voltage/GND for calibration.
+- qcom,scale-function : Scaling function used to convert raw ADC code to units specific to
+			a given channel.
+			Select from the following unsigned int.
+			0 : Default scaling to convert raw adc code to voltage.
+			1 : Conversion to temperature based on btm parameters.
+			2 : Returns result in milli degree's Centigrade.
+			3 : Returns current across 0.1 ohm resistor.
+			4 : Returns XO thermistor voltage in degree's Centigrade.
+- qcom,hw-settle-time : Settling period for the channel before ADC read.
+			Select from the following unsigned int.
+			0 : 0us
+			1 : 100us
+			2 : 200us
+			3 : 300us
+			4 : 400us
+			5 : 500us
+			6 : 600us
+			7 : 700us
+			8 : 800us
+			9 : 900us
+			0xa : 1ms
+			0xb : 2ms
+			0xc : 4ms
+			0xd : 6ms
+			0xe : 8ms
+			0xf : 10ms
+- qcom,fast-avg-setup : Average number of samples to be used for measurement. Fast averaging
+			provides the option to obtain a single measurement from the ADC that
+			is an average of multiple samples. The value selected is 2^(value)
+			Select from the following unsigned int.
+			0 : 1
+			1 : 2
+			2 : 4
+			3 : 8
+			4 : 16
+			5 : 32
+			6 : 64
+			7 : 128
+			8 : 256
+
+Example:
+	/* Main Node */
+	qcom,vadc@3100 {
+                        compatible = "qcom,qpnp-vadc";
+                        reg = <0x3100 0x100>;
+                        interrupts = <0x0 0x31 0x0>;
+                        qcom,adc-bit-resolution = <15>;
+                        qcom,adc-vdd-reference = <1800>;
+
+			/* Channel Node */
+                        chan@0 {
+                                label = "usb_in";
+                                qcom,channel-num = <0>;
+                                qcom,decimation = <0>;
+                                qcom,pre-div-channel-scaling = <20>;
+                                qcom,calibration-type = "absolute";
+                                qcom,scale-function = <0>;
+                                qcom,hw-settle-time = <0>;
+                                qcom,fast-avg-setup = <0>;
+                        };
+	};
diff --git a/Documentation/devicetree/bindings/iommu/msm_iommu.txt b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
index c198fe9..f5a2590 100644
--- a/Documentation/devicetree/bindings/iommu/msm_iommu.txt
+++ b/Documentation/devicetree/bindings/iommu/msm_iommu.txt
@@ -12,10 +12,14 @@
   - interrupts : should contain the context bank interrupt.
   - qcom,iommu-ctx-sids : List of stream identifiers associated with this
     translation context.
-  - qcom,iommu-ctx-name : Name of the context bank
+  - label : Name of the context bank
   - qcom,iommu-smt-size : Number of SMR entries in the SMT of this HW block
   - vdd-supply : vdd-supply: phandle to GDSC regulator controlling this IOMMU.
 
+Optional properties:
+- qcom,needs-alt-core-clk : boolean to enable the secondary core clock for
+  access to the IOMMU configuration registers
+
 Example:
 
         qcom,iommu@fda64000 {
@@ -27,12 +31,12 @@
                         reg = <0xfda6c000 0x1000>;
                         interrupts = <0 70 0>;
                         qcom,iommu-ctx-sids = <0 2>;
-			qcom,iommu-ctx-name = "ctx_0";
+			label = "ctx_0";
                 };
                 qcom,iommu-ctx@fda6d000 {
                         reg = <0xfda6d000 0x1000>;
                         interrupts = <0 71 0>;
                         qcom,iommu-ctx-sids = <1>;
-			qcom,iommu-ctx-name = "ctx_1";
+			label = "ctx_1";
                 };
         };
diff --git a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
index a8de90f..c674a13 100644
--- a/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
+++ b/Documentation/devicetree/bindings/mmc/msm_sdcc.txt
@@ -16,10 +16,8 @@
 Optional Properties:
 	- cell-index - defines slot ID.
 	- qcom,sdcc-bus-width - defines the bus I/O width that controller supports.
-	- qcom,sdcc-wp-gpio - defines write protect switch gpio.
-	- qcom,sdcc-wp-polarity - specifies the polarity of wp switch.
-	- qcom,sdcc-cd-gpio - defines card detect gpio number.
-	- qcom,sdcc-cd-polarity - specifies the polarity of cd gpio.
+	- wp-gpios - specify GPIO for write protect switch detection.
+	- cd-gpios - specify GPIO for card detection.
 	- qcom,sdcc-nonremovable - specifies whether the card in slot is
 				hot pluggable or hard wired.
 	- qcom,sdcc-disable_cmd23 - disable sending CMD23 to card when controller can't support it.
diff --git a/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
new file mode 100644
index 0000000..bddbbae
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
@@ -0,0 +1,51 @@
+Qualcomm Parallel Interface controller (QPIC) for NAND devices
+
+Required properties:
+- compatible : "qcom,msm-nand".
+- reg : should specify QPIC NANDc and BAM physical address range.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should specify QPIC/BAM interrupt numbers.
+- interrupt-names : should specify relevant names to each interrupts property
+  defined.
+
+MTD flash partition layout for NAND devices -
+
+Each partition is represented as a sub-node of the qcom,mtd-partitions device.
+Each node's name represents the name of the corresponding partition.
+
+Required properties:
+- reg : The partition offset and size
+- label : The label / name for this partition.
+
+Optional properties:
+- read-only: This parameter, if present, indicates that this partition
+  should only be mounted read-only.
+
+Examples:
+
+	qcom,nand@f9af0000 {
+		compatible = "qcom,msm-nand";
+		reg = <0xf9af0000 0x1000>,
+		      <0xf9ac4000 0x8000>;
+		reg-names = "nand_phys",
+			    "bam_phys";
+		interrupts = <0 279 0>;
+		interrupt-names = "bam_irq";
+	};
+
+       qcom,mtd-partitions {
+	       #address-cells = <1>;
+	       #size-cells = <1>;
+               partition@0 {
+                       label = "boot";
+                       reg = <0x00000000 0x1000>;
+               };
+               partition@00020000 {
+                       label = "userdata";
+                       reg = <0x00020000 0x1000>;
+               };
+               partition@00040000 {
+                       label = "system";
+                       reg = <0x00040000 0x1000>;
+               };
+       };
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 16a5c77..84f0c24 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -1,4 +1,4 @@
-Qualcomm audio devices for ALSA sound soc
+Qualcomm audio devices for ALSA sound SoC
 
 * msm-pcm
 
@@ -42,6 +42,22 @@
 
  - compatible : "qcom,msm-dai-fe"
 
+* msm-dai-q6
+
+[First Level Nodes]
+
+Required properties:
+
+ - compatible : "msm-dai-q6"
+
+[Second Level Nodes]
+
+Required properties:
+
+ - compatible : "qcom,msm-dai-q6-dev"
+ - qcom,msm-dai-q6-dev-id : The slimbus multi channel port ID
+                            Value is from 16384 to 16393
+
 * msm-auxpcm
 
 [First Level Nodes]
@@ -87,6 +103,22 @@
 
  - compatible : "qcom,msm-pcm-hostless"
 
+* msm-ocmem-audio
+
+Required properties:
+
+ - compatible :                            "qcom,msm-ocmem-audio"
+
+ - qcom,msm-ocmem-audio-src-id:            Master port id
+
+ - qcom,msm-ocmem-audio-dst-id:            Slave port id
+
+ - qcom,msm-ocmem-audio-ab:                arbitrated bandwidth
+                                           in Bytes/s
+
+ - qcom,msm-ocmem-audio-ib:                instantaneous bandwidth
+                                           in Bytes/s
+
 Example:
 
         qcom,msm-pcm {
@@ -117,6 +149,19 @@
                 compatible = "qcom,msm-dai-fe";
         };
 
+	qcom,msm-dai-q6 {
+		compatible = "qcom,msm-dai-q6";
+		qcom,msm-dai-q6-sb-0-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16384>;
+		};
+
+		qcom,msm-dai-q6-sb-0-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16385>;
+		};
+	};
+
         qcom,msm-auxpcm {
                 compatible = "qcom,msm-auxpcm-resource";
                 qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
@@ -143,3 +188,11 @@
                 compatible = "qcom,msm-pcm-hostless";
         };
 
+	qcom,msm-ocmem-audio {
+		compatible = "qcom,msm-ocmem-audio";
+		qcom,msm-ocmem-audio-src-id = <11>;
+		qcom,msm-ocmem-audio-dst-id = <604>;
+		qcom,msm-ocmem-audio-ab = <209715200>;
+		qcom,msm-ocmem-audio-ib = <471859200>;
+	};
+
diff --git a/Documentation/mtd/devices/msm_qpic_nand.txt b/Documentation/mtd/devices/msm_qpic_nand.txt
new file mode 100644
index 0000000..301e823
--- /dev/null
+++ b/Documentation/mtd/devices/msm_qpic_nand.txt
@@ -0,0 +1,296 @@
+Introduction
+============
+
+In MDM9x25, new NAND controller(NANDc) has been added and it has the
+following major changes as compared to its previous version -
+
+1. It includes Secured BAM-Lite and the support for ADM(Application Data Mover)
+has been removed.
+
+2. It includes 4 bit BCH ECC and the support for 4 bit Reed Solomon ECC has
+been removed.
+
+3. The support for Dual NAND controllers has been removed and thus the
+software features like ping-pong mode and interleave mode are deprecated.
+
+4. It includes support for dual buffers in case of read and one dedicated
+write buffer to each processor (Modem and Apps).
+
+This new NAND driver takes care of all the above new hardware changes. In
+addition to the above hardware changes, it also takes care of software device
+tree changes.
+
+Hardware description
+====================
+
+The NANDc Core:
+---------------
+Qualcomm Parallel Interface Controller (QPIC), formerly named EBI2, is a
+wrapper module which integrates a NAND controller core and a LCD controller
+core and multiplexes their access to shared parallel interfaces pins. Both
+controller cores are accessible to processors (Modem and Apps), and share
+master access to the Peripheral NoC (Network on Chip) via a BAM module.
+
+In MDM9x25, QPIC is located on the peripheral NoC, connected via a 32-bit AHB
+Master port and a 32-bit AHB Slave Port. The NANDc register interface goes
+through AHB Slave Port and data transfers using BAM goes through AHB Master
+Port. The NAND Controller (NANDc) is a hardware core which manages the access
+to an off-chip NAND device.
+
+BAM-Lite:
+---------
+BAM(Bus Access Manager) can transfer data between a peripheral and memory,
+or between two peripherals in a BAM to BAM mode. Each BAM contains multiple
+DMA channels, called pipes. A pipe provides a unidirectional data transfer
+engine, capable of either receiving data in consumer mode, or transmitting
+data in producer mode. The consumer fetches the data from the source system
+memory, and the producer writes data to the destination system memory.
+
+BAM-Lite's interface is similar to the BAM interface with slight changes to
+the sideband interface. BAM-Lite is an area-optimized version of BAM. BAM-Lite
+supports new features such as Notify-When-Done(NWD), pipe lock/unlock and
+command descriptors.
+
+NANDc has a secured BAM-Lite which provides DMA support for the NANDc and
+command support for accessing the NANDc registers. It is called secured
+because it has an integrated APU (Address Protection Unit) that validates
+every access to BAM and its peripheral registers.
+
+The NANDc has in total 6 BAM pipes - 3 pipes are dedicated for each processor
+(Modem and Apps) at the hardware level.
+
+Software description
+====================
+
+The NAND device is shared between two independent file systems, each running
+on a different processor - the application processor (Apps) and the Modem.
+The NAND driver uses BAM driver to transfer NAND operation requests and
+data to/from the NAND Controller (NANDc) core through the BAM pipes. Every
+NANDc register read/write access must go through BAM as it facilitates security
+mechanisms to enable simultaneous access to NAND device from both processors
+(Modem and Apps).
+
+The Apps NAND driver registers NANDc BAM peripheral with BAM driver, allocates
+endpoints and descriptor FIFO memory and registers for complete event
+notification for the following pipes:
+
+	- system consumer pipe for data (pipe#0) : This BAM pipe will be used
+	  for transferring data from system memory to NANDc i.e., during write.
+
+	- system producer pipe for data (pipe#1) : This BAM pipe will be used
+	  for transferring data from NANDc to system memory i.e., during read.
+
+	- system consumer pipe for commands (pipe#2) : This BAM pipe will be
+	  used for both reading and writing to NANDc registers. It can be
+	  configured either as consumer pipe or producer pipe but as per HW
+	  team's recommendation it is configured as consumer pipe.
+
+Control path:
+-------------
+Each NAND operation can be described as a set of BAM command or/and data
+descriptors.
+
+A command descriptor(CD) points to the starting address of a command
+block. Each command block may contain a set of command elements where
+each command element is a single NANDc register read/write. The NAND
+driver submits all command descriptors to its system consumer pipe#2.
+
+Data path:
+----------
+A Data Descriptor(DD) points to the start of a data block which is a sequential
+chunk of data.
+
+For page write operations, the NAND driver submits data descriptors to system
+consumer pipe#0 and as per the descriptors submitted, the BAM reads data from
+the data block into the NANDc buffer.
+
+For page read operations, the NAND driver submits data descriptors to system
+producer pipe#1 and as per the descriptors submitted, the BAM reads data from
+the NANDc buffer into the data block.
+
+The driver submits a CD/DD using BAM driver APIs sps_transfer_one()/
+sps_transfer(). To this API, flags is passed as one of the arguments and if
+SPS_IOVEC_FLAG_CMD is passed, then it is identified as a CD. Otherwise, it is
+identified as a DD. The other valid SPS flags for a CD/DD are -
+
+	- SPS_IOVEC_FLAG_INT : This flag indicates BAM driver to raise BAM
+	  interrupt after the current descriptor with this flag has been
+	  processed by BAM HW. This flag is applicable for both CD and DD.
+
+	- SPS_IOVEC_FLAG_NWD : This flag indicates BAM HW to not process
+	  next descriptors until it receives an acknowledgement by NANDc
+	  that the current descriptor with this flag is completely
+	  executed. This flag is applicable only for a CD.
+
+	- SPS_IOVEC_FLAG_LOCK: This flag marks the beginning of a series of
+	  commands and it indicates that all the CDs submitted on this pipe
+	  must be executed atomically without any interruption by commands
+	  from other pipes. This is applicable only for a CD.
+
+	- SPS_IOVEC_FLAG_UNLOCK: This flag marks the end of a series of
+	  commands and it indicates that the other pipe that was locked due to
+	  SPS_IOVEC_FLAG_LOCK flag can be unblocked after the current CD
+	  with this flag is executed. This is applicable only for a CD.
+
+	- SPS_IOVEC_FLAG_EOT - This flag indicates to BAM driver that the
+	  current descriptor with this flag is the last descriptor submitted
+	  during write operation. This is applicable only for a DD.
+
+Error handling:
+---------------
+After a page read/write complete notification from BAM, NAND driver validates
+the values read from NANDc registers to confirm the success/failure of page
+read/write operation. For example, after a page read/write is complete, the
+drivers reads the NANDc status registers to check for any operational errors,
+protection violation errors and device status errors, number of correctable/
+uncorrectable errors reported by the controller. Based on the error conditions
+that are met, the driver reports appropriate error codes to upper layers. The
+upper layers respond to these errors and take appropriate action.
+
+Design
+======
+
+The existing NAND driver (ADM based) can not be reused due to many major HW
+changes (see Introduction section) in the new NANDc core. Some of the complex
+features (Dual NAND controllers support) too are deprecated in the new NANDc.
+Hence, a new NAND driver is written to take care of both SPS/BAM changes and
+other controller specific changes. The rest of the interaction with MTD and
+YAFFS2 remains same as its previous version of NAND driver msm_nand.c.
+
+Power Management
+================
+
+Two clocks are supplied by the system's clock controller to NANDc - AHB clock
+and interface clock. The interface clock is the clock that drives some of the
+HW blocks within NANDc. As of now, both these clocks are always on. But NANDc
+provides clock gating if some of the QPIC clock control registers are
+configured. The clock gating is yet to be enabled by driver.
+
+SMP/Multi-Core
+==============
+
+The locking mechanism for page read/write operations is taken care of by the
+higher layers such as MTD/YAFFS2 and only one single page operation can happen
+at any time on a given partition. For a single page operation, there is always
+only one context associated within the driver and thus no additional handling
+is required within the driver. But it is possible for file system to issue
+one request on partition and at the same time to issue another request on
+another partition as each partition corresponds to different MTD block device.
+This situation is handled within the driver by properly acquiring a mutex lock
+before submitting any command/data descriptors to any of the BAM pipes.
+
+
+Security
+========
+
+The same NAND device is accessible from both processors (Modem and Apps) and
+thus to avoid any configuration overwrite issues during a page operation,
+driver on each processor (Modem and Apps) must explicitly use BAM pipe
+lock/unlock mechanism. This is taken care of by the NAND driver. The partition
+violation issues are prevented by an MPU (Memory Protection Unit) that is
+attached to NANDc.
+
+Performance
+===========
+
+None.
+
+Interface
+=========
+
+The NAND driver registers each partition on NAND device as a MTD block device
+using mtd_device_register(). As part of this registration, the following ops
+(struct mtd_info *mtd) are registered with MTD layer for each partition:
+
+mtd->_block_isbad = msm_nand_block_isbad;
+mtd->_block_markbad = msm_nand_block_markbad;
+mtd->_read = msm_nand_read;
+mtd->_write = msm_nand_write;
+mtd->_read_oob  = msm_nand_read_oob;
+mtd->_write_oob = msm_nand_write_oob;
+mtd->_erase = msm_nand_erase;
+
+msm_nand_block_isbad() - This checks if a block is bad or not by reading bad
+block byte in the first page of a block. A block is considered as bad if bad
+block byte location contains any value other than 0xFF.
+
+msm_nand_block_markbad() - This marks a block as bad by writing 0 to the
+entire first page of the block and thus writing 0 to bad block byte location.
+
+msm_nand_read/write() - This is used to read/write only main data from/to
+single/multiple pages within NAND device. The YAFFS2 file system can send
+read/write request for two types of data -
+
+	- Main data : This is the actual data to be read/written from/to a
+	  page during a read/write operation on this device. The size of this
+	  data request is typically based on the page size of the device
+	  (2K/4K).
+
+	- OOB(Out Of Band) data : This is the spare data that will be used by
+	  file system to keep track of its meta data/tags associated with the
+	  actual data. As of now, the file system needs only 16 bytes to
+	  accommodate this data. The NAND driver always writes this data
+	  towards the end of main data.
+
+It is up to the file system whether or not to send a read/write request for OOB
+data along with main data.
+
+msm_nand_read_oob()/write_oob() - This is used to read/write both main data
+and spare data from/to single/multiple pages within NAND device.
+
+msm_nand_erase() - This erases the complete block by sending erase command to
+the device.
+
+The YAFFS2 file system registers as the user of MTD device and uses the ops
+exposed by the NAND driver to perform read/write/erase operations on NAND
+device. As of now, the driver can work with only YAFFS2 file system. An
+attempt to use it with any other file system might demand additional changes
+in the driver.
+
+Driver parameters
+=================
+
+None.
+
+Config options
+==============
+
+The config option MTD_MSM_QPIC_NAND enables this driver.
+
+Dependencies
+============
+
+It depends on the following kernel components:
+
+- SPS/BAM driver
+- MTD core layer
+- To add necessary NANDc and BAM resources to .dts file
+
+It depends on the following non-kernel components:
+
+The partition information of the NAND device must be passed by Modem subsystem
+to Apps boot loader and Apps boot loader must update the .dts file
+with the partition information as per the defined MTD bindings.
+
+The detailed information on MTD bindings can be found at -
+Documentation/devicetree/bindings/mtd/msm_qpic_nand.txt
+
+User space utilities
+====================
+
+None.
+
+Other
+=====
+
+No changes other than device tree changes are anticipated.
+
+Known issues
+============
+
+None.
+
+To do
+=====
+
+The NANDc core supports clock gating and is not yet supported by the driver.
diff --git a/arch/arm/boot/dts/msm-iommu.dtsi b/arch/arm/boot/dts/msm-iommu.dtsi
new file mode 100755
index 0000000..0e2ddce9
--- /dev/null
+++ b/arch/arm/boot/dts/msm-iommu.dtsi
@@ -0,0 +1,161 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/ {
+	jpeg_iommu: qcom,iommu@fda64000 {
+		compatible = "qcom,msm-smmu-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		reg = <0xfda64000 0x10000>;
+		vdd-supply = <&gdsc_jpeg>;
+		qcom,iommu-smt-size = <16>;
+		status = "disabled";
+
+		qcom,iommu-ctx@fda6c000 {
+			reg = <0xfda6c000 0x1000>;
+			interrupts = <0 69 0>;
+			qcom,iommu-ctx-sids = <0>;
+			label = "jpeg_enc0";
+		};
+
+		qcom,iommu-ctx@fda6d000 {
+			reg = <0xfda6d000 0x1000>;
+			interrupts = <0 70 0>;
+			qcom,iommu-ctx-sids = <1>;
+			label = "jpeg_enc1";
+		};
+
+		qcom,iommu-ctx@fda6e000 {
+			reg = <0xfda6e000 0x1000>;
+			interrupts = <0 71 0>;
+			qcom,iommu-ctx-sids = <2>;
+			label = "jpeg_dec";
+		};
+	};
+
+	mdp_iommu: qcom,iommu@fd928000 {
+		compatible = "qcom,msm-smmu-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		reg = <0xfd928000 0x10000>;
+		vdd-supply = <&gdsc_mdss>;
+		qcom,iommu-smt-size = <16>;
+		status = "disabled";
+
+		qcom,iommu-ctx@fd930000 {
+			reg = <0xfd930000 0x1000>;
+			interrupts = <0 46 0>;
+			qcom,iommu-ctx-sids = <0>;
+			label = "mdp_0";
+		};
+
+		qcom,iommu-ctx@fd931000 {
+			reg = <0xfd931000 0x1000>;
+			interrupts = <0 47 0>;
+			qcom,iommu-ctx-sids = <1>;
+			label = "mdp_1";
+		};
+	};
+
+	venus_iommu: qcom,iommu@fdc84000 {
+		compatible = "qcom,msm-smmu-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		reg = <0xfdc84000 0x10000>;
+		vdd-supply = <&gdsc_venus>;
+		qcom,iommu-smt-size = <16>;
+		qcom,needs-alt-core-clk;
+		status = "disabled";
+
+		qcom,iommu-ctx@fdc8c000 {
+			reg = <0xfdc8c000 0x1000>;
+			interrupts = <0 43 0>;
+			qcom,iommu-ctx-sids = <0 1 2 3 4 5>;
+			label = "venus_ns";
+		};
+
+		qcom,iommu-ctx@fdc8d000 {
+			reg = <0xfdc8d000 0x1000>;
+			interrupts = <0 42 0>;
+			qcom,iommu-ctx-sids = <0x80 0x81 0x82 0x83 0x84 0x85>;
+			label = "venus_cp";
+		};
+
+		qcom,iommu-ctx@fdc8e000 {
+			reg = <0xfdc8e000 0x1000>;
+			interrupts = <0 41 0>;
+			qcom,iommu-ctx-sids = <0xc0 0xc6>;
+			label = "venus_fw";
+		};
+	};
+
+	kgsl_iommu: qcom,iommu@fdb10000 {
+		compatible = "qcom,msm-smmu-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		reg = <0xfdb10000 0x10000>;
+		vdd-supply = <&gdsc_oxili_cx>;
+		qcom,iommu-smt-size = <32>;
+		qcom,needs-alt-core-clk;
+		status = "disabled";
+
+		qcom,iommu-ctx@fdb18000 {
+			reg = <0xfdb18000 0x1000>;
+			interrupts = <0 240 0>;
+			qcom,iommu-ctx-sids = <0>;
+			label = "gfx3d_user";
+		};
+
+		qcom,iommu-ctx@fdb19000 {
+			reg = <0xfdb19000 0x1000>;
+			interrupts = <0 241 0>;
+			qcom,iommu-ctx-sids = <1>;
+			label = "gfx3d_priv";
+		};
+	};
+
+	vfe_iommu: qcom,iommu@fda44000 {
+		compatible = "qcom,msm-smmu-v2";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		reg = <0xfda44000 0x10000>;
+		vdd-supply = <&gdsc_vfe>;
+		qcom,iommu-smt-size = <32>;
+		status = "disabled";
+
+		qcom,iommu-ctx@fda4c000 {
+			reg = <0xfda4c000 0x1000>;
+			interrupts = <0 64 0>;
+			qcom,iommu-ctx-sids = <0>;
+			label = "vfe0";
+		};
+
+		qcom,iommu-ctx@fda4d000 {
+			reg = <0xfda4d000 0x1000>;
+			interrupts = <0 65 0>;
+			qcom,iommu-ctx-sids = <1>;
+			label = "vfe1";
+		};
+
+		qcom,iommu-ctx@fda4e000 {
+			reg = <0xfda4e000 0x1000>;
+			interrupts = <0 66 0>;
+			qcom,iommu-ctx-sids = <2>;
+			label = "cpp";
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/msm8974-iommu.dtsi b/arch/arm/boot/dts/msm8974-iommu.dtsi
index a115fd8..184826e 100755
--- a/arch/arm/boot/dts/msm8974-iommu.dtsi
+++ b/arch/arm/boot/dts/msm8974-iommu.dtsi
@@ -10,108 +10,24 @@
  * GNU General Public License for more details.
  */
 
-/ {
-	jpeg: qcom,iommu@fda64000 {
-		compatible = "qcom,msm-smmu-v2";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		ranges;
-		reg = <0xfda64000 0x10000>;
-		vdd-supply = <&gdsc_jpeg>;
-		qcom,iommu-smt-size = <16>;
+/include/ "msm-iommu.dtsi"
 
-		qcom,iommu-ctx@fda6c000 {
-			reg = <0xfda6c000 0x1000>;
-			interrupts = <0 69 0>;
-			qcom,iommu-ctx-sids = <0>;
-			qcom,iommu-ctx-name = "jpeg_enc0";
-		};
-		qcom,iommu-ctx@fda6d000 {
-			reg = <0xfda6d000 0x1000>;
-			interrupts = <0 70 0>;
-			qcom,iommu-ctx-sids = <1>;
-			qcom,iommu-ctx-name = "jpeg_enc1";
-		};
-		qcom,iommu-ctx@fda6e000 {
-			reg = <0xfda6e000 0x1000>;
-			interrupts = <0 71 0>;
-			qcom,iommu-ctx-sids = <2>;
-			qcom,iommu-ctx-name = "jpeg_dec";
-		};
-	};
+&jpeg_iommu {
+	status = "ok";
+};
 
-	mdp: qcom,iommu@fd928000 {
-		compatible = "qcom,msm-smmu-v2";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		ranges;
-		reg = <0xfd928000 0x10000>;
-		vdd-supply = <&gdsc_mdss>;
-		qcom,iommu-smt-size = <16>;
+&mdp_iommu {
+	status = "ok";
+};
 
-		qcom,iommu-ctx@fd930000 {
-			reg = <0xfd930000 0x1000>;
-			interrupts = <0 74 0>;
-			qcom,iommu-ctx-sids = <0>;
-			qcom,iommu-ctx-name = "mdp_0";
-		};
-		qcom,iommu-ctx@fd931000 {
-			reg = <0xfd931000 0x1000>;
-			interrupts = <0 75 0>;
-			qcom,iommu-ctx-sids = <1>;
-			qcom,iommu-ctx-name = "mdp_1";
-		};
-	};
+&venus_iommu {
+	status = "ok";
+};
 
-	venus: qcom,iommu@fdc84000 {
-		compatible = "qcom,msm-smmu-v2";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		ranges;
-		reg = <0xfdc84000 0x10000>;
-		vdd-supply = <&gdsc_venus>;
-		qcom,iommu-smt-size = <16>;
+&kgsl_iommu {
+	status = "ok";
+};
 
-		qcom,iommu-ctx@fdc8c000 {
-			reg = <0xfdc8c000 0x1000>;
-			interrupts = <0 43 0>;
-			qcom,iommu-ctx-sids = <0 1 2 3 4 5>;
-			qcom,iommu-ctx-name = "venus_ns";
-		};
-		qcom,iommu-ctx@fdc8d000 {
-			reg = <0xfdc8d000 0x1000>;
-			interrupts = <0 42 0>;
-			qcom,iommu-ctx-sids = <0x80 0x81 0x82 0x83 0x84 0x85>;
-			qcom,iommu-ctx-name = "venus_cp";
-		};
-		qcom,iommu-ctx@fdc8e000 {
-			reg = <0xfdc8e000 0x1000>;
-			interrupts = <0 41 0>;
-			qcom,iommu-ctx-sids = <0xc0 0xc6>;
-			qcom,iommu-ctx-name = "venus_fw";
-		};
-	};
-
-	kgsl: qcom,iommu@fdb10000 {
-		compatible = "qcom,msm-smmu-v2";
-		#address-cells = <1>;
-		#size-cells = <1>;
-		ranges;
-		reg = <0xfdb10000 0x10000>;
-		vdd-supply = <&gdsc_oxili_cx>;
-		qcom,iommu-smt-size = <32>;
-
-		qcom,iommu-ctx@fdb18000 {
-			reg = <0xfdb18000 0x1000>;
-			interrupts = <0 240 0>;
-			qcom,iommu-ctx-sids = <0>;
-			qcom,iommu-ctx-name = "gfx3d_user";
-		};
-		qcom,iommu-ctx@fdb19000 {
-			reg = <0xfdb19000 0x1000>;
-			interrupts = <0 241 0>;
-			qcom,iommu-ctx-sids = <1>;
-			qcom,iommu-ctx-name = "gfx3d_priv";
-		};
-	};
+&vfe_iommu {
+	status = "ok";
 };
diff --git a/arch/arm/boot/dts/msm8974.dtsi b/arch/arm/boot/dts/msm8974.dtsi
index 719eb4e..edb57dd 100644
--- a/arch/arm/boot/dts/msm8974.dtsi
+++ b/arch/arm/boot/dts/msm8974.dtsi
@@ -400,6 +400,19 @@
 		compatible = "qcom,msm-dai-fe";
 	};
 
+	qcom,msm-dai-q6 {
+		compatible = "qcom,msm-dai-q6";
+		qcom,msm-dai-q6-sb-0-rx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16384>;
+		};
+
+		qcom,msm-dai-q6-sb-0-tx {
+			compatible = "qcom,msm-dai-q6-dev";
+			qcom,msm-dai-q6-dev-id = <16385>;
+		};
+	};
+
 	qcom,msm-auxpcm {
 		compatible = "qcom,msm-auxpcm-resource";
 		qcom,msm-cpudai-auxpcm-clk = "pcm_clk";
@@ -426,6 +439,14 @@
 		compatible = "qcom,msm-pcm-hostless";
 	};
 
+	qcom,msm-ocmem-audio {
+		compatible = "qcom,msm-ocmem-audio";
+		qcom,msm-ocmem-audio-src-id = <11>;
+		qcom,msm-ocmem-audio-dst-id = <604>;
+		qcom,msm-ocmem-audio-ab = <32505856>;
+		qcom,msm-ocmem-audio-ib = <32505856>;
+	};
+
 	qcom,mss@fc880000 {
 		compatible = "qcom,pil-q6v5-mss";
 		reg = <0xfc880000 0x100>,
@@ -557,6 +578,12 @@
 		qcom,firmware-max-paddr = <0xFA00000>;
 	};
 
+	qcom,cache_erp {
+		compatible = "qcom,cache_erp";
+		interrupts = <1 9 0>, <0 2 0>;
+		interrupt-names = "l1_irq", "l2_irq";
+	};
+
 	tsens@fc4a8000 {
 		compatible = "qcom,msm-tsens";
 		reg = <0xfc4a8000 0x2000>,
@@ -567,6 +594,28 @@
 		qcom,slope = <1134 1122 1142 1123 1176 1176 1176 1186 1176
 				1176 1176>;
 	};
+
+	qcom,msm-rtb {
+		compatible = "qcom,msm-rtb";
+		qcom,memory-reservation-type = "EBI1";
+		qcom,memory-reservation-size = <0x100000>; /* 1M EBI1 buffer */
+	};
+
+        qcom,qcedev@fd440000 {
+		compatible = "qcom,qcedev";
+		reg = <0xfd440000 0x20000>,
+		      <0xfd444000 0x8000>;
+		interrupts = <0 235 0>;
+		qcom,bam-pipes = <0>;
+	};
+
+        qcom,qcrypto@fd444000 {
+		compatible = "qcom,qcrypto";
+		reg = <0xfd440000 0x20000>,
+		      <0xfd444000 0x8000>;
+		interrupts = <0 235 0>;
+		qcom,bam-pipes = <1>;
+	};
 };
 
 /include/ "msm-pm8x41-rpm-regulator.dtsi"
diff --git a/arch/arm/boot/dts/msm9625.dts b/arch/arm/boot/dts/msm9625.dts
index 6c007fb..6b44be9 100644
--- a/arch/arm/boot/dts/msm9625.dts
+++ b/arch/arm/boot/dts/msm9625.dts
@@ -55,4 +55,14 @@
 		reg = <0xf991f000 0x1000>;
 		interrupts = <0 109 0>;
 	};
+
+	qcom,nand@f9ac0000 {
+		compatible = "qcom,msm-nand";
+		reg = <0xf9ac0000 0x1000>,
+		      <0xf9ac4000 0x8000>;
+		reg-names = "nand_phys",
+			    "bam_phys";
+		interrupts = <0 247 0>;
+		interrupt-names = "bam_irq";
+	};
 };
diff --git a/arch/arm/configs/msm8660-perf_defconfig b/arch/arm/configs/msm8660-perf_defconfig
index 173dcca..f3b2219 100644
--- a/arch/arm/configs/msm8660-perf_defconfig
+++ b/arch/arm/configs/msm8660-perf_defconfig
@@ -340,6 +340,7 @@
 CONFIG_FB_MSM_TRIPLE_BUFFER=y
 CONFIG_FB_MSM_MDP40=y
 CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
 CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
 CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
 CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
diff --git a/arch/arm/configs/msm8660_defconfig b/arch/arm/configs/msm8660_defconfig
index f1c0aaa..4748496 100644
--- a/arch/arm/configs/msm8660_defconfig
+++ b/arch/arm/configs/msm8660_defconfig
@@ -342,6 +342,7 @@
 CONFIG_FB_MSM_TRIPLE_BUFFER=y
 CONFIG_FB_MSM_MDP40=y
 CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
 CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
 CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
 CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
diff --git a/arch/arm/configs/msm8960-perf_defconfig b/arch/arm/configs/msm8960-perf_defconfig
index c2f4702..66e71fc 100644
--- a/arch/arm/configs/msm8960-perf_defconfig
+++ b/arch/arm/configs/msm8960-perf_defconfig
@@ -83,6 +83,7 @@
 CONFIG_MSM_GSS_SSR_8064=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_MSM_RPM_LOG=y
+CONFIG_MSM_RPM_RBCPR_STATS_LOG=y
 CONFIG_MSM_RPM_STATS_LOG=y
 CONFIG_MSM_BUS_SCALING=y
 CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED=y
@@ -367,6 +368,7 @@
 CONFIG_FB_MSM_TRIPLE_BUFFER=y
 CONFIG_FB_MSM_MDP40=y
 CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
 CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
 CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
 CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
diff --git a/arch/arm/configs/msm8960_defconfig b/arch/arm/configs/msm8960_defconfig
index a50485d..3731845 100644
--- a/arch/arm/configs/msm8960_defconfig
+++ b/arch/arm/configs/msm8960_defconfig
@@ -83,6 +83,7 @@
 CONFIG_MSM_TZ_LOG=y
 CONFIG_MSM_RPM_LOG=y
 CONFIG_MSM_RPM_STATS_LOG=y
+CONFIG_MSM_RPM_RBCPR_STATS_LOG=y
 CONFIG_MSM_BUS_SCALING=y
 CONFIG_MSM_BUS_RPM_MULTI_TIER_ENABLED=y
 CONFIG_MSM_WATCHDOG=y
@@ -370,6 +371,7 @@
 CONFIG_FB_MSM_TRIPLE_BUFFER=y
 CONFIG_FB_MSM_MDP40=y
 CONFIG_FB_MSM_OVERLAY=y
+CONFIG_FB_MSM_NO_MDP_PIPE_CTRL=y
 CONFIG_FB_MSM_OVERLAY0_WRITEBACK=y
 CONFIG_FB_MSM_OVERLAY1_WRITEBACK=y
 CONFIG_FB_MSM_WRITEBACK_MSM_PANEL=y
diff --git a/arch/arm/configs/msm8974_defconfig b/arch/arm/configs/msm8974_defconfig
index 63d2ced..b9f26d9 100644
--- a/arch/arm/configs/msm8974_defconfig
+++ b/arch/arm/configs/msm8974_defconfig
@@ -50,7 +50,6 @@
 CONFIG_MSM_PIL_PRONTO=y
 CONFIG_MSM_TZ_LOG=y
 CONFIG_MSM_DIRECT_SCLK_ACCESS=y
-CONFIG_MSM_WATCHDOG_V2=y
 CONFIG_MSM_OCMEM=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -171,6 +170,7 @@
 CONFIG_SND=y
 CONFIG_SND_SOC=y
 CONFIG_SND_SOC_MSM8974=y
+CONFIG_WCD9320_CODEC=y
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_STORAGE=y
@@ -199,6 +199,9 @@
 CONFIG_MMC_TEST=m
 CONFIG_MMC_MSM=y
 CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_DRV_MSM is not set
+CONFIG_RTC_DRV_QPNP=y
 CONFIG_STAGING=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
@@ -210,6 +213,7 @@
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_BAMDMA=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QPNP_POWER_ON=y
 CONFIG_MSM_IOMMU=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
@@ -245,3 +249,4 @@
 CONFIG_CRYPTO_DEV_QCEDEV=m
 CONFIG_CRC_CCITT=y
 CONFIG_LIBCRC32C=y
+CONFIG_MSM_BUS_SCALING=y
diff --git a/arch/arm/configs/msm9625_defconfig b/arch/arm/configs/msm9625_defconfig
index 7ca9f49..9094db7 100644
--- a/arch/arm/configs/msm9625_defconfig
+++ b/arch/arm/configs/msm9625_defconfig
@@ -50,6 +50,12 @@
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_SUSPEND is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_MTD_MSM_NAND is not set
+CONFIG_MTD_MSM_QPIC_NAND=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 # CONFIG_ANDROID_PMEM is not set
@@ -74,7 +80,8 @@
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
-# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_YAFFS_FS=y
+CONFIG_YAFFS_DISABLE_TAGS_ECC=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ASCII=y
 CONFIG_NLS_ISO8859_1=y
@@ -103,5 +110,4 @@
 CONFIG_CRYPTO_DEFLATE=y
 # CONFIG_CRYPTO_HW is not set
 CONFIG_CRC_CCITT=y
-CONFIG_CRC16=y
 CONFIG_LIBCRC32C=y
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 99ee2de..e32194f 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -2,6 +2,7 @@
 #define __ASMARM_ARCH_TIMER_H
 
 #include <linux/ioport.h>
+#include <linux/clocksource.h>
 
 struct arch_timer {
 	struct resource	res[3];
@@ -10,6 +11,7 @@
 #ifdef CONFIG_ARM_ARCH_TIMER
 int arch_timer_register(struct arch_timer *);
 int arch_timer_of_register(void);
+cycle_t arch_counter_get_cntpct(void);
 #else
 static inline int arch_timer_register(struct arch_timer *at)
 {
@@ -20,6 +22,11 @@
 {
 	return -ENXIO;
 }
+
+static inline cycle_t arch_counter_get_cntpct(void)
+{
+	return 0;
+}
 #endif
 
 #endif
diff --git a/arch/arm/include/asm/mach/mmc.h b/arch/arm/include/asm/mach/mmc.h
index 562f13c..d341ea9 100644
--- a/arch/arm/include/asm/mach/mmc.h
+++ b/arch/arm/include/asm/mach/mmc.h
@@ -146,7 +146,7 @@
 	unsigned int uhs_caps2;
 	void (*sdio_lpm_gpio_setup)(struct device *, unsigned int);
         unsigned int status_irq;
-	unsigned int status_gpio;
+	int status_gpio;
 	/* Indicates the polarity of the GPIO line when card is inserted */
 	bool is_status_gpio_active_low;
         unsigned int sdiowakeup_irq;
@@ -158,7 +158,7 @@
 	unsigned int msmsdcc_fmax;
 	bool nonremovable;
 	unsigned int mpm_sdiowakeup_int;
-	unsigned int wpswitch_gpio;
+	int wpswitch_gpio;
 	bool is_wpswitch_active_low;
 	struct msm_mmc_slot_reg_data *vreg_data;
 	int is_sdio_al_client;
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 87bb7d3..43c627d 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -22,6 +22,7 @@
 #include <linux/of_address.h>
 #include <linux/io.h>
 #include <linux/irq.h>
+#include <linux/export.h>
 
 #include <asm/cputype.h>
 #include <asm/localtimer.h>
@@ -315,10 +316,16 @@
 	return ((cycle_t) cvalh << 32) | cvall;
 }
 
-static cycle_t arch_counter_read(struct clocksource *cs)
+cycle_t arch_counter_get_cntpct(void)
 {
 	return arch_specific_timer->get_cntpct();
 }
+EXPORT_SYMBOL(arch_counter_get_cntpct);
+
+static cycle_t arch_counter_read(struct clocksource *cs)
+{
+	return arch_counter_get_cntpct();
+}
 
 #ifdef ARCH_HAS_READ_CURRENT_TIMER
 int read_current_timer(unsigned long *timer_val)
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 4f14698..54f3292 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -39,6 +39,7 @@
 	select MSM_RUN_QUEUE_STATS if MSM_SOC_REV_A
 	select DONT_MAP_HOLE_AFTER_MEMBANK0
 	select MIGHT_HAVE_CACHE_L2X0
+	select MSM_MODEM_RESTART
 
 config ARCH_MSM7X30
 	bool "MSM7x30"
@@ -65,6 +66,7 @@
 	select MULTI_IRQ_HANDLER
 	select MSM_PM2 if PM
 	select HOLES_IN_ZONE if SPARSEMEM
+	select MSM_MODEM_RESTART
 
 config ARCH_QSD8X50
 	bool "QSD8X50"
@@ -78,6 +80,7 @@
 	select MSM_GPIOMUX
 	select MSM_DALRPC
 	select MSM_PM2 if PM
+	select MSM_MODEM_RESTART
 
 config ARCH_MSM8X60
 	bool "MSM8X60"
@@ -695,20 +698,6 @@
        help
          Support for the Qualcomm MSM8x55 SVLTE SURF eval board.
 
-config MACH_MSM8X60_RUMI3
-	depends on ARCH_MSM8X60
-	default n
-	bool "MSM8x60 RUMI3"
-	help
-	  Support for the Qualcomm MSM8x60 RUMI3 emulator.
-
-config MACH_MSM8X60_SIM
-	depends on ARCH_MSM8X60
-	default n
-	bool "MSM8x60 Simulator"
-	help
-	  Support for the Qualcomm MSM8x60 simulator.
-
 config MACH_MSM8X60_SURF
 	depends on ARCH_MSM8X60
 	default n
@@ -754,18 +743,6 @@
 	help
 	  Support for the Qualcomm MSM8x60 Dragon board.
 
-config MACH_MSM8960_SIM
-	depends on ARCH_MSM8960
-	bool "MSM8960 Simulator"
-	help
-	  Support for the Qualcomm MSM8960 simulator.
-
-config MACH_MSM8960_RUMI3
-	depends on ARCH_MSM8960
-	bool "MSM8960 RUMI3"
-	help
-	  Support for the Qualcomm MSM8960 RUMI3 emulator.
-
 config MACH_MSM8960_CDP
 	depends on ARCH_MSM8960
 	bool "MSM8960 CDP"
@@ -840,18 +817,6 @@
 	  The two TSIF cores share the same DM configuration
 	  so they cannot be used simultaneously.
 
-config MACH_APQ8064_SIM
-	depends on ARCH_APQ8064
-	bool "APQ8064 Simulator"
-	help
-	  Support for the Qualcomm APQ8064 simulator.
-
-config MACH_APQ8064_RUMI3
-	depends on ARCH_APQ8064
-	bool "APQ8064 RUMI3"
-	help
-	  Support for the Qualcomm APQ8064 RUMI3 emulator.
-
 config MACH_APQ8064_CDP
 	depends on ARCH_APQ8064
 	bool "APQ8064 CDP"
@@ -2064,6 +2029,16 @@
 	  the low power modes that RPM enters. The drivers outputs the message
 	  via a debugfs node.
 
+config MSM_RPM_RBCPR_STATS_LOG
+        tristate "MSM Resource Power Manager RPBCPR Stat Driver"
+        depends on DEBUG_FS
+        depends on MSM_RPM
+          help
+          This option enables a driver which reads RPM messages from a shared
+          memory location. These messages provide statistical information about
+          RBCPR (Rapid Bridge Core Power Reduction) information . The drivers
+          outputs the message via a debugfs node.
+
 config MSM_DIRECT_SCLK_ACCESS
 	bool "Direct access to the SCLK timer"
 	default n
@@ -2080,6 +2055,9 @@
 config MSM_NATIVE_RESTART
 	bool
 
+config MSM_MODEM_RESTART
+	bool
+
 config MSM_PM2
 	depends on PM
 	bool
@@ -2117,6 +2095,14 @@
 		deadlocks. It does not run during the bootup process, so it will
 		not catch any early lockups.
 
+config MSM_MEMORY_DUMP
+	bool "MSM Memory Dump Support"
+	help
+		This enables memory dump feature. It allows various client
+		subsystems to register respective dump regions. At the time
+		of deadlocks or cpu hangs these dump regions are captured to
+		give a snapshot of the system at the time of the crash.
+
 config MSM_DLOAD_MODE
 	bool "Enable download mode on crashes"
 	depends on ARCH_MSM8X60 || ARCH_MSM8960 || ARCH_MSM9615
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index aff2251..33b153f 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -229,8 +229,7 @@
 obj-$(CONFIG_MSM_WATCHDOG) += msm_watchdog.o
 obj-$(CONFIG_MSM_WATCHDOG) += msm_watchdog_asm.o
 obj-$(CONFIG_MSM_WATCHDOG_V2) += msm_watchdog_v2.o
-obj-$(CONFIG_MACH_MSM8X60_RUMI3) += board-msm8x60.o
-obj-$(CONFIG_MACH_MSM8X60_SIM) += board-msm8x60.o
+obj-$(CONFIG_MSM_MEMORY_DUMP) += msm_memory_dump.o
 obj-$(CONFIG_MACH_MSM8X60_SURF) += board-msm8x60.o
 obj-$(CONFIG_MACH_MSM8X60_FFA) += board-msm8x60.o
 obj-$(CONFIG_MACH_MSM8X60_FLUID) += board-msm8x60.o
@@ -274,8 +273,6 @@
 board-8960-all-objs += board-8960.o board-8960-camera.o board-8960-display.o board-8960-pmic.o board-8960-storage.o board-8960-gpiomux.o
 board-8930-all-objs += board-8930.o board-8930-camera.o board-8930-display.o board-8930-pmic.o board-8930-storage.o board-8930-gpiomux.o devices-8930.o board-8930-gpu.o
 board-8064-all-objs += board-8064.o board-8064-pmic.o board-8064-storage.o board-8064-gpiomux.o board-8064-camera.o board-8064-display.o board-8064-gpu.o
-obj-$(CONFIG_MACH_MSM8960_SIM) += board-8960-all.o board-8960-regulator.o
-obj-$(CONFIG_MACH_MSM8960_RUMI3) += board-8960-all.o board-8960-regulator.o
 obj-$(CONFIG_MACH_MSM8960_CDP) += board-8960-all.o board-8960-regulator.o
 obj-$(CONFIG_MACH_MSM8960_MTP) += board-8960-all.o board-8960-regulator.o
 obj-$(CONFIG_MACH_MSM8960_FLUID) += board-8960-all.o board-8960-regulator.o
@@ -283,8 +280,6 @@
 obj-$(CONFIG_MACH_MSM8930_MTP) += board-8930-all.o board-8930-regulator.o
 obj-$(CONFIG_MACH_MSM8930_FLUID) += board-8930-all.o board-8930-regulator.o
 obj-$(CONFIG_PM8921_BMS) += bms-batterydata.o bms-batterydata-desay.o
-obj-$(CONFIG_MACH_APQ8064_SIM) += board-8064-all.o board-8064-regulator.o
-obj-$(CONFIG_MACH_APQ8064_RUMI3) += board-8064-all.o board-8064-regulator.o
 obj-$(CONFIG_MACH_APQ8064_CDP) += board-8064-all.o board-8064-regulator.o
 obj-$(CONFIG_MACH_APQ8064_MTP) += board-8064-all.o board-8064-regulator.o
 obj-$(CONFIG_MACH_APQ8064_LIQUID) += board-8064-all.o board-8064-regulator.o
@@ -297,7 +292,7 @@
 obj-$(CONFIG_ARCH_MSM8974) += clock-local2.o clock-pll.o clock-8974.o clock-rpm.o clock-voter.o
 obj-$(CONFIG_ARCH_MSM8974) += gdsc.o
 obj-$(CONFIG_ARCH_MSM9625) += board-9625.o board-9625-gpiomux.o
-obj-$(CONFIG_ARCH_MSM8930) += acpuclock-8930.o acpuclock-8627.o
+obj-$(CONFIG_ARCH_MSM8930) += acpuclock-8930.o acpuclock-8627.o acpuclock-8930aa.o
 
 obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire.o board-sapphire-gpio.o
 obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-keypad.o board-sapphire-panel.o
@@ -328,6 +323,7 @@
 obj-$(CONFIG_MSM_MPM_OF) += mpm-of.o
 obj-$(CONFIG_MSM_MPM) += mpm.o
 obj-$(CONFIG_MSM_RPM_STATS_LOG) += rpm_stats.o
+obj-$(CONFIG_MSM_RPM_RBCPR_STATS_LOG) += rpm_rbcpr_stats.o
 obj-$(CONFIG_MSM_RPM_LOG) += rpm_log.o
 obj-$(CONFIG_MSM_TZ_LOG) += tz_log.o
 obj-$(CONFIG_MSM_XO) += msm_xo.o
@@ -362,6 +358,7 @@
 obj-$(CONFIG_MSM_FAKE_BATTERY) += fish_battery.o
 obj-$(CONFIG_MSM_RPC_VIBRATOR) += msm_vibrator.o
 obj-$(CONFIG_MSM_NATIVE_RESTART) += restart.o
+obj-$(CONFIG_MSM_MODEM_RESTART) += restart_7k.o
 
 obj-$(CONFIG_MSM_PROC_COMM_REGULATOR) += proccomm-regulator.o
 ifdef CONFIG_MSM_PROC_COMM_REGULATOR
diff --git a/arch/arm/mach-msm/acpuclock-7627.c b/arch/arm/mach-msm/acpuclock-7627.c
index f9ff226..639cc94 100644
--- a/arch/arm/mach-msm/acpuclock-7627.c
+++ b/arch/arm/mach-msm/acpuclock-7627.c
@@ -38,11 +38,15 @@
 
 #include "smd_private.h"
 #include "acpuclock.h"
+#include "clock.h"
 
 #define A11S_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100)
 #define A11S_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
 #define A11S_VDD_SVS_PLEVEL_ADDR (MSM_CSR_BASE + 0x124)
 
+#define PLL4_L_VAL_ADDR		(MSM_CLK_CTL_BASE + 0x378)
+#define PLL4_M_VAL_ADDR		(MSM_CLK_CTL_BASE + 0x37C)
+#define PLL4_N_VAL_ADDR		(MSM_CLK_CTL_BASE + 0x380)
 
 #define POWER_COLLAPSE_KHZ 19200
 
@@ -67,6 +71,12 @@
 	const char *name;
 };
 
+struct pll_config {
+	unsigned int l;
+	unsigned int m;
+	unsigned int n;
+};
+
 static struct acpu_clk_src pll_clk[ACPU_PLL_END] = {
 	[ACPU_PLL_0] = { .name = "pll0_clk" },
 	[ACPU_PLL_1] = { .name = "pll1_clk" },
@@ -74,6 +84,13 @@
 	[ACPU_PLL_4] = { .name = "pll4_clk" },
 };
 
+static struct pll_config pll4_cfg_tbl[] = {
+	{  36, 1, 2 }, /*  700.8 MHz */
+	{  52, 1, 2 }, /* 1008 MHz */
+	{  63, 0, 1 }, /* 1209.6 MHz */
+	{  73, 0, 1 }, /* 1401.6 MHz */
+};
+
 struct clock_state {
 	struct clkctl_acpu_speed	*current_speed;
 	struct mutex			lock;
@@ -91,15 +108,20 @@
 	unsigned int	ahbclk_div;
 	int		vdd;
 	unsigned int	axiclk_khz;
+	struct pll_config *pll_rate;
 	unsigned long   lpj; /* loops_per_jiffy */
 	/* Pointers in acpu_freq_tbl[] for max up/down steppings. */
 	struct clkctl_acpu_speed *down[ACPU_PLL_END];
 	struct clkctl_acpu_speed *up[ACPU_PLL_END];
 };
 
+static bool dynamic_reprogram;
 static struct clock_state drv_state = { 0 };
 static struct clkctl_acpu_speed *acpu_freq_tbl;
 
+/* Switch to this when reprogramming PLL4 */
+static struct clkctl_acpu_speed *backup_s;
+
 /*
  * ACPU freq tables used for different PLLs frequency combinations. The
  * correct table is selected during init.
@@ -119,7 +141,7 @@
 	{ 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 5, 160000 },
 	{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 7, 200000 },
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7627 with CDMA capable modem */
@@ -133,7 +155,7 @@
 	{ 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 5, 160000 },
 	{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 7, 200000 },
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7627 with GSM capable modem - PLL2 @ 800 */
@@ -147,7 +169,7 @@
 	{ 0, 400000, ACPU_PLL_2, 2, 1, 133333, 2, 5, 160000 },
 	{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
 	{ 1, 800000, ACPU_PLL_2, 2, 0, 200000, 3, 7, 200000 },
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7627 with CDMA capable modem - PLL2 @ 800 */
@@ -161,7 +183,7 @@
 	{ 0, 400000, ACPU_PLL_2, 2, 1, 133333, 2, 5, 160000 },
 	{ 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 160000 },
 	{ 1, 800000, ACPU_PLL_2, 2, 0, 200000, 3, 7, 200000 },
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7627a PLL2 @ 1200MHz with GSM capable modem */
@@ -176,7 +198,7 @@
 	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7627a PLL2 @ 1200MHz with CDMA capable modem */
@@ -191,7 +213,7 @@
 	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 120000 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7627aa PLL4 @ 1008MHz with GSM capable modem */
@@ -206,7 +228,7 @@
 	{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7627aa PLL4 @ 1008MHz with CDMA capable modem */
@@ -221,7 +243,7 @@
 	{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 8625 PLL4 @ 1209MHz with GSM capable modem */
@@ -235,7 +257,7 @@
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 0, 604800, ACPU_PLL_4, 6, 1, 75600, 3, 6, 160000 },
 	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 7, 200000},
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 8625 PLL4 @ 1209MHz with CDMA capable modem */
@@ -249,7 +271,40 @@
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 0, 604800, ACPU_PLL_4, 6, 1, 75600, 3, 6, 160000 },
 	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 7, 200000},
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
+};
+
+/* 8625 PLL4 @ 1401.6MHz with GSM capable modem */
+static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200_pll4_1401[] = {
+	{ 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 30720 },
+	{ 0, 61440, ACPU_PLL_1, 1, 3,  7680, 3, 0, 61440 },
+	{ 0, 122880, ACPU_PLL_1, 1, 1,  15360, 3, 1, 61440 },
+	{ 1, 245760, ACPU_PLL_1, 1, 0, 30720, 3, 1, 61440 },
+	{ 0, 300000, ACPU_PLL_2, 2, 3, 37500, 3, 2, 122880 },
+	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 2, 122880 },
+	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 3, 122880 },
+	{ 0, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 4, 160000 },
+	{ 1, 700800, ACPU_PLL_4, 6, 0, 87500, 3, 4, 160000, &pll4_cfg_tbl[0]},
+	{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 5, 200000, &pll4_cfg_tbl[1]},
+	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 6, 200000, &pll4_cfg_tbl[2]},
+	{ 1, 1401600, ACPU_PLL_4, 6, 0, 175000, 3, 7, 200000, &pll4_cfg_tbl[3]},
+	{ 0 }
+};
+
+/* 8625 PLL4 @ 1401.6MHz with CDMA capable modem */
+static struct clkctl_acpu_speed pll0_960_pll1_196_pll2_1200_pll4_1401[] = {
+	{ 0, 19200, ACPU_PLL_TCXO, 0, 0, 2400, 3, 0, 24576 },
+	{ 0, 65536, ACPU_PLL_1, 1, 3,  8192, 3, 1, 49152 },
+	{ 0, 98304, ACPU_PLL_1, 1, 1,  12288, 3, 2, 49152 },
+	{ 1, 196608, ACPU_PLL_1, 1, 0, 24576, 3, 3, 98304 },
+	{ 1, 320000, ACPU_PLL_0, 4, 2, 40000, 3, 2, 122880 },
+	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 3, 122880 },
+	{ 0, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 4, 160000 },
+	{ 1, 700800, ACPU_PLL_4, 6, 0, 87500, 3, 4, 160000, &pll4_cfg_tbl[0]},
+	{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 5, 200000, &pll4_cfg_tbl[1]},
+	{ 1, 1209600, ACPU_PLL_4, 6, 0, 151200, 3, 6, 200000, &pll4_cfg_tbl[2]},
+	{ 1, 1401600, ACPU_PLL_4, 6, 0, 175000, 3, 7, 200000, &pll4_cfg_tbl[3]},
+	{ 0 }
 };
 
 /* 8625 PLL4 @ 1152MHz with GSM capable modem */
@@ -263,7 +318,7 @@
 	{ 0, 576000, ACPU_PLL_4, 6, 1, 72000, 3, 6, 160000 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 1, 1152000, ACPU_PLL_4, 6, 0, 144000, 3, 7, 200000},
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 8625 PLL4 @ 1115MHz with CDMA capable modem */
@@ -277,7 +332,7 @@
 	{ 0, 576000, ACPU_PLL_4, 6, 1, 72000, 3, 6, 160000 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 1, 1152000, ACPU_PLL_4, 6, 0, 144000, 3, 7, 200000},
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 
@@ -292,7 +347,7 @@
 	{ 0, 400000, ACPU_PLL_2, 2, 2, 50000, 3, 4, 122880 },
 	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 200000 },
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7627a PLL2 @ 1200MHz with GSM capable modem */
@@ -307,7 +362,7 @@
 	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7627a PLL2 @ 1200MHz with CDMA capable modem */
@@ -322,7 +377,7 @@
 	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 120000 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 1, 800000, ACPU_PLL_4, 6, 0, 100000, 3, 7, 200000 },
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7627aa PLL4 @ 1008MHz with GSM capable modem */
@@ -337,7 +392,7 @@
 	{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7627aa PLL4 @ 1008MHz with CDMA capable modem */
@@ -352,7 +407,7 @@
 	{ 0, 504000, ACPU_PLL_4, 6, 1, 63000, 3, 6, 160000 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 160000 },
 	{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 /* 7625a PLL2 @ 1200MHz with GSM capable modem */
@@ -366,7 +421,7 @@
 	{ 0, 400000, ACPU_PLL_2, 2, 2, 50000, 3, 4, 122880 },
 	{ 1, 480000, ACPU_PLL_0, 4, 1, 60000, 3, 5, 122880 },
 	{ 1, 600000, ACPU_PLL_2, 2, 1, 75000, 3, 6, 200000 },
-	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
+	{ 0 }
 };
 
 #define PLL_CONFIG(m0, m1, m2, m4) { \
@@ -399,6 +454,8 @@
 	PLL_CONFIG(960, 196, 1200, 1209),
 	PLL_CONFIG(960, 245, 1200, 1152),
 	PLL_CONFIG(960, 196, 1200, 1152),
+	PLL_CONFIG(960, 245, 1200, 1401),
+	PLL_CONFIG(960, 196, 1200, 1401),
 	{ 0, 0, 0, 0, 0 }
 };
 
@@ -439,6 +496,31 @@
 }
 #endif
 
+static void update_jiffies(int cpu, unsigned long loops)
+{
+#ifdef CONFIG_SMP
+	for_each_possible_cpu(cpu) {
+		per_cpu(cpu_data, cpu).loops_per_jiffy =
+						loops;
+	}
+#endif
+	/* Adjust the global one */
+	loops_per_jiffy = loops;
+}
+
+/* Assumes PLL4 is off and the acpuclock isn't sourced from PLL4 */
+static void acpuclk_config_pll4(struct pll_config *pll)
+{
+	/* Make sure write to disable PLL_4 has completed
+	 * before reconfiguring that PLL. */
+	mb();
+	writel_relaxed(pll->l, PLL4_L_VAL_ADDR);
+	writel_relaxed(pll->m, PLL4_M_VAL_ADDR);
+	writel_relaxed(pll->n, PLL4_N_VAL_ADDR);
+	/* Make sure PLL is programmed before returning. */
+	mb();
+}
+
 static int acpuclk_set_vdd_level(int vdd)
 {
 	uint32_t current_vdd;
@@ -524,6 +606,7 @@
 	struct clkctl_acpu_speed *cur_s, *tgt_s, *strt_s;
 	int res, rc = 0;
 	unsigned int plls_enabled = 0, pll;
+	int delta;
 
 	if (reason == SETRATE_CPUFREQ)
 		mutex_lock(&drv_state.lock);
@@ -592,6 +675,61 @@
 	pr_debug("Switching from ACPU rate %u KHz -> %u KHz\n",
 		       strt_s->a11clk_khz, tgt_s->a11clk_khz);
 
+	delta = abs((int)(strt_s->a11clk_khz - tgt_s->a11clk_khz));
+
+	if (dynamic_reprogram) {
+		if (tgt_s->pll == ACPU_PLL_4) {
+			if (strt_s->pll == ACPU_PLL_4 ||
+					delta > drv_state.max_speed_delta_khz) {
+				/*
+				 * Enable the backup PLL if required
+				 * and switch to it.
+				 */
+				clk_enable(pll_clk[backup_s->pll].clk);
+				acpuclk_set_div(backup_s);
+			}
+			/* Make sure PLL4 is off before reprogramming */
+			if ((plls_enabled & (1 << tgt_s->pll))) {
+				clk_disable(pll_clk[tgt_s->pll].clk);
+				plls_enabled &= (0 << tgt_s->pll);
+			}
+			acpuclk_config_pll4(tgt_s->pll_rate);
+			pll_clk[tgt_s->pll].clk->rate = tgt_s->a11clk_khz*1000;
+
+		} else if (strt_s->pll == ACPU_PLL_4) {
+			if (delta > drv_state.max_speed_delta_khz) {
+				/*
+				 * Enable the bcackup PLL if required
+				 * and switch to it.
+				 */
+				clk_enable(pll_clk[backup_s->pll].clk);
+				acpuclk_set_div(backup_s);
+			}
+		}
+
+		if (!(plls_enabled & (1 << tgt_s->pll))) {
+			rc = clk_enable(pll_clk[tgt_s->pll].clk);
+			if (rc < 0) {
+				pr_err("PLL%d enable failed (%d)\n",
+					tgt_s->pll, rc);
+				goto out;
+			}
+			plls_enabled |= 1 << tgt_s->pll;
+		}
+		acpuclk_set_div(tgt_s);
+		drv_state.current_speed = tgt_s;
+		/* Re-adjust lpj for the new clock speed. */
+		update_jiffies(cpu, cur_s->lpj);
+
+		/* Disable the backup PLL */
+		if ((delta > drv_state.max_speed_delta_khz)
+				|| (strt_s->pll == ACPU_PLL_4 &&
+					tgt_s->pll == ACPU_PLL_4))
+			clk_disable_unprepare(pll_clk[backup_s->pll].clk);
+
+		goto done;
+	}
+
 	while (cur_s != tgt_s) {
 		/*
 		 * Always jump to target freq if within max_speed_delta_khz,
@@ -648,17 +786,10 @@
 		acpuclk_set_div(cur_s);
 		drv_state.current_speed = cur_s;
 		/* Re-adjust lpj for the new clock speed. */
-#ifdef CONFIG_SMP
-		for_each_possible_cpu(cpu) {
-			per_cpu(cpu_data, cpu).loops_per_jiffy =
-							cur_s->lpj;
-		}
-#endif
-		/* Adjust the global one */
-		loops_per_jiffy = cur_s->lpj;
+		update_jiffies(cpu, cur_s->lpj);
 
 	}
-
+done:
 	/* Nothing else to do for SWFI. */
 	if (reason == SETRATE_SWFI)
 		goto out;
@@ -781,7 +912,7 @@
 static void __devinit select_freq_plan(void)
 {
 	unsigned long pll_mhz[ACPU_PLL_END];
-	struct pll_freq_tbl_map *t;
+	struct pll_freq_tbl_map *t = acpu_freq_tbl_list;
 	int i;
 
 	/* Get PLL clocks */
@@ -817,7 +948,7 @@
 		}
 	} else {
 		/* Select the right table to use. */
-		for (t = acpu_freq_tbl_list; t->tbl != 0; t++) {
+		for (; t->tbl != 0; t++) {
 			if (t->pll0_rate == pll_mhz[ACPU_PLL_0]
 				&& t->pll1_rate == pll_mhz[ACPU_PLL_1]
 				&& t->pll2_rate == pll_mhz[ACPU_PLL_2]
@@ -828,6 +959,25 @@
 		}
 	}
 
+	/*
+	 * When PLL4 can run max @ 1401.6MHz, we have to support
+	 * dynamic reprograming of PLL4.
+	 *
+	 * Also find the backup pll used during PLL4 reprogramming.
+	 * We are using PLL2@600MHz as backup PLL, since 800MHz jump
+	 * is fine.
+	 */
+	if (t->pll4_rate == 1401) {
+		dynamic_reprogram = 1;
+		for ( ; t->tbl->a11clk_khz; t->tbl++) {
+			if (t->tbl->pll == ACPU_PLL_2 &&
+					t->tbl->a11clk_src_div == 1) {
+				backup_s = t->tbl;
+				break;
+			}
+		}
+	}
+
 	if (acpu_freq_tbl == NULL) {
 		pr_crit("Unknown PLL configuration!\n");
 		BUG();
@@ -988,3 +1138,4 @@
 	return platform_driver_register(&acpuclk_7627_driver);
 }
 postcore_initcall(acpuclk_7627_init);
+
diff --git a/arch/arm/mach-msm/acpuclock-8064.c b/arch/arm/mach-msm/acpuclock-8064.c
index d46d268..6f9960d 100644
--- a/arch/arm/mach-msm/acpuclock-8064.c
+++ b/arch/arm/mach-msm/acpuclock-8064.c
@@ -31,10 +31,12 @@
 	.has_droop_ctl = true,
 	.droop_offset = 0x14,
 	.droop_val = 0x0108C000,
-	.low_vdd_l_max = 40,
-	.vdd[HFPLL_VDD_NONE] = 0,
-	.vdd[HFPLL_VDD_LOW]  = 945000,
+	.low_vdd_l_max = 22,
+	.nom_vdd_l_max = 42,
+	.vdd[HFPLL_VDD_NONE] =       0,
+	.vdd[HFPLL_VDD_LOW]  =  945000,
 	.vdd[HFPLL_VDD_NOM]  = 1050000,
+	.vdd[HFPLL_VDD_HIGH] = 1150000,
 };
 
 static struct scalable scalable[] __initdata = {
@@ -43,7 +45,7 @@
 		.aux_clk_sel_phys = 0x02088014,
 		.aux_clk_sel = 3,
 		.l2cpmr_iaddr = 0x4501,
-		.vreg[VREG_CORE] = { "krait0", 1300000, 1740000 },
+		.vreg[VREG_CORE] = { "krait0", 1300000 },
 		.vreg[VREG_MEM]  = { "krait0_mem", 1150000 },
 		.vreg[VREG_DIG]  = { "krait0_dig", 1150000 },
 		.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
@@ -53,7 +55,7 @@
 		.aux_clk_sel_phys = 0x02098014,
 		.aux_clk_sel = 3,
 		.l2cpmr_iaddr = 0x5501,
-		.vreg[VREG_CORE] = { "krait1", 1300000, 1740000 },
+		.vreg[VREG_CORE] = { "krait1", 1300000 },
 		.vreg[VREG_MEM]  = { "krait1_mem", 1150000 },
 		.vreg[VREG_DIG]  = { "krait1_dig", 1150000 },
 		.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
@@ -63,7 +65,7 @@
 		.aux_clk_sel_phys = 0x020A8014,
 		.aux_clk_sel = 3,
 		.l2cpmr_iaddr = 0x6501,
-		.vreg[VREG_CORE] = { "krait2", 1300000, 1740000 },
+		.vreg[VREG_CORE] = { "krait2", 1300000 },
 		.vreg[VREG_MEM]  = { "krait2_mem", 1150000 },
 		.vreg[VREG_DIG]  = { "krait2_dig", 1150000 },
 		.vreg[VREG_HFPLL_A] = { "krait2_hfpll", 1800000 },
@@ -73,7 +75,7 @@
 		.aux_clk_sel_phys = 0x020B8014,
 		.aux_clk_sel = 3,
 		.l2cpmr_iaddr = 0x7501,
-		.vreg[VREG_CORE] = { "krait3", 1300000, 1740000 },
+		.vreg[VREG_CORE] = { "krait3", 1300000 },
 		.vreg[VREG_MEM]  = { "krait3_mem", 1150000 },
 		.vreg[VREG_DIG]  = { "krait3_dig", 1150000 },
 		.vreg[VREG_HFPLL_A] = { "krait3_hfpll", 1800000 },
@@ -104,111 +106,107 @@
 };
 
 static struct l2_level l2_freq_tbl[] __initdata __initdata = {
-	[0]  = { {STBY_KHZ, QSB,   0, 0, 0x00 }, 1050000, 1050000, 0 },
-	[1]  = { {  384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
-	[2]  = { {  432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
-	[3]  = { {  486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
-	[4]  = { {  540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
-	[5]  = { {  594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
-	[6]  = { {  648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
-	[7]  = { {  702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
-	[8]  = { {  756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
-	[9]  = { {  810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
-	[10] = { {  864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
-	[11] = { {  918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 5 },
-	[12] = { {  972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 5 },
-	[13] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 5 },
-	[14] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 5 },
-	[15] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 5 },
+	[0]  = { {  384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
+	[1]  = { {  432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
+	[2]  = { {  486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
+	[3]  = { {  540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
+	[4]  = { {  594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
+	[5]  = { {  648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
+	[6]  = { {  702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
+	[7]  = { {  756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
+	[8]  = { {  810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
+	[9]  = { {  864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
+	[10] = { {  918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 5 },
+	[11] = { {  972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 5 },
+	[12] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 5 },
+	[13] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 5 },
+	[14] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 5 },
 };
 
 static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
-	{ 0, { STBY_KHZ, QSB,   0, 0, 0x00 }, L2(0),   950000 },
-	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(1),   950000 },
-	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(7),   975000 },
-	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(7),   975000 },
-	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(7),  1000000 },
-	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(7),  1000000 },
-	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(7),  1025000 },
-	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(7),  1025000 },
-	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(7),  1075000 },
-	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(7),  1075000 },
-	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(7),  1100000 },
-	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(7),  1100000 },
-	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(7),  1125000 },
-	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(7),  1125000 },
-	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1175000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1175000 },
-	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1200000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1200000 },
-	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1225000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1225000 },
-	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1237500 },
-	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(15), 1237500 },
-	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(15), 1250000 },
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   950000 },
+	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(6),   975000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(6),   975000 },
+	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(6),  1000000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(6),  1000000 },
+	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(6),  1025000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(6),  1025000 },
+	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(6),  1075000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(6),  1075000 },
+	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(6),  1100000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(6),  1100000 },
+	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(6),  1125000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(6),  1125000 },
+	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(14), 1175000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(14), 1175000 },
+	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(14), 1200000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(14), 1200000 },
+	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(14), 1225000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(14), 1225000 },
+	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(14), 1237500 },
+	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(14), 1237500 },
+	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(14), 1250000 },
 	{ 0, { 0 } }
 };
 
 static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
-	{ 0, { STBY_KHZ, QSB,   0, 0, 0x00 }, L2(0),   900000 },
-	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(1),   900000 },
-	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(7),   925000 },
-	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(7),   925000 },
-	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(7),   950000 },
-	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(7),   950000 },
-	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(7),   975000 },
-	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(7),   975000 },
-	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(7),  1025000 },
-	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(7),  1025000 },
-	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(7),  1050000 },
-	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(7),  1050000 },
-	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(7),  1075000 },
-	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(7),  1075000 },
-	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1125000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1125000 },
-	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1150000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1150000 },
-	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1175000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1175000 },
-	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1187500 },
-	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(15), 1187500 },
-	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(15), 1200000 },
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   900000 },
+	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(6),   925000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(6),   925000 },
+	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(6),   950000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(6),   950000 },
+	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(6),   975000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(6),   975000 },
+	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(6),  1025000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(6),  1025000 },
+	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(6),  1050000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(6),  1050000 },
+	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(6),  1075000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(6),  1075000 },
+	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(14), 1125000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(14), 1125000 },
+	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(14), 1150000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(14), 1150000 },
+	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(14), 1175000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(14), 1175000 },
+	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(14), 1187500 },
+	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(14), 1187500 },
+	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(14), 1200000 },
 	{ 0, { 0 } }
 };
 
 static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
-	{ 0, { STBY_KHZ, QSB,   0, 0, 0x00 }, L2(0),   850000 },
-	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(1),   850000 },
-	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(7),   875000 },
-	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(7),   875000 },
-	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(7),   900000 },
-	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(7),   900000 },
-	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(7),   925000 },
-	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(7),   925000 },
-	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(7),   975000 },
-	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(7),   975000 },
-	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(7),  1000000 },
-	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(7),  1000000 },
-	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(7),  1025000 },
-	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(7),  1025000 },
-	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1075000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1075000 },
-	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1100000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1100000 },
-	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1125000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1125000 },
-	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1137500 },
-	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(15), 1137500 },
-	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(15), 1150000 },
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   850000 },
+	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(6),   875000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(6),   875000 },
+	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(6),   900000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(6),   900000 },
+	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(6),   925000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(6),   925000 },
+	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(6),   975000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(6),   975000 },
+	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(6),  1000000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(6),  1000000 },
+	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(6),  1025000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(6),  1025000 },
+	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(14), 1075000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(14), 1075000 },
+	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(14), 1100000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(14), 1100000 },
+	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(14), 1125000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(14), 1125000 },
+	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(14), 1137500 },
+	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(14), 1137500 },
+	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(14), 1150000 },
 	{ 0, { 0 } }
 };
 
 static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
-	[PVS_SLOW]    = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow) },
-	[PVS_NOMINAL] = { acpu_freq_tbl_nom,  sizeof(acpu_freq_tbl_nom)  },
-	[PVS_FAST]    = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast) },
-	/* TODO: update the faster table when data is available */
-	[PVS_FASTER]  = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast) },
+[PVS_SLOW]    = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow),     0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom,  sizeof(acpu_freq_tbl_nom),  25000 },
+[PVS_FAST]    = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
+/* TODO: update the faster table when data is available */
+[PVS_FASTER]  = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
 };
 
 static struct acpuclk_krait_params acpuclk_8064_params __initdata = {
@@ -220,6 +218,7 @@
 	.l2_freq_tbl_size = sizeof(l2_freq_tbl),
 	.bus_scale = &bus_scale_data,
 	.qfprom_phys_base = 0x00700000,
+	.stby_khz = 384000,
 };
 
 static int __init acpuclk_8064_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/acpuclock-8627.c b/arch/arm/mach-msm/acpuclock-8627.c
index 8060803..1642dae 100644
--- a/arch/arm/mach-msm/acpuclock-8627.c
+++ b/arch/arm/mach-msm/acpuclock-8627.c
@@ -37,10 +37,12 @@
 	.has_droop_ctl = true,
 	.droop_offset = 0x14,
 	.droop_val = 0x0108C000,
-	.low_vdd_l_max = 40,
+	.low_vdd_l_max = 22,
+	.nom_vdd_l_max = 42,
 	.vdd[HFPLL_VDD_NONE] = LVL_NONE,
 	.vdd[HFPLL_VDD_LOW]  = LVL_LOW,
 	.vdd[HFPLL_VDD_NOM]  = LVL_NOM,
+	.vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
 };
 
 static struct scalable scalable[] __initdata = {
@@ -49,7 +51,7 @@
 		.aux_clk_sel_phys = 0x02088014,
 		.aux_clk_sel = 3,
 		.l2cpmr_iaddr = 0x4501,
-		.vreg[VREG_CORE] = { "krait0", 1300000, 1740000 },
+		.vreg[VREG_CORE] = { "krait0", 1300000 },
 		.vreg[VREG_MEM]  = { "krait0_mem", 1150000 },
 		.vreg[VREG_DIG]  = { "krait0_dig", 1150000 },
 		.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
@@ -59,7 +61,7 @@
 		.aux_clk_sel_phys = 0x02098014,
 		.aux_clk_sel = 3,
 		.l2cpmr_iaddr = 0x5501,
-		.vreg[VREG_CORE] = { "krait1", 1300000, 1740000 },
+		.vreg[VREG_CORE] = { "krait1", 1300000 },
 		.vreg[VREG_MEM]  = { "krait1_mem", 1150000 },
 		.vreg[VREG_DIG]  = { "krait1_dig", 1150000 },
 		.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
@@ -90,43 +92,41 @@
 
 /* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
 static struct l2_level l2_freq_tbl[] __initdata = {
-	[0]  = { {STBY_KHZ, QSB,   0, 0, 0x00 },  LVL_NOM, 1050000, 0 },
-	[1]  = { {  384000, PLL_8, 0, 2, 0x00 },  LVL_NOM, 1050000, 1 },
-	[2]  = { {  432000, HFPLL, 2, 0, 0x20 },  LVL_NOM, 1050000, 1 },
-	[3]  = { {  486000, HFPLL, 2, 0, 0x24 },  LVL_NOM, 1050000, 1 },
-	[4]  = { {  540000, HFPLL, 2, 0, 0x28 },  LVL_NOM, 1050000, 2 },
-	[5]  = { {  594000, HFPLL, 1, 0, 0x16 },  LVL_NOM, 1050000, 2 },
-	[6]  = { {  648000, HFPLL, 1, 0, 0x18 },  LVL_NOM, 1050000, 2 },
-	[7]  = { {  702000, HFPLL, 1, 0, 0x1A },  LVL_NOM, 1050000, 3 },
-	[8]  = { {  756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 3 },
-	[9]  = { {  810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 3 },
-	[10] = { {  864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
-	[11] = { {  918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 4 },
-	[12] = { {  972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 4 },
+	[0]  = { {  384000, PLL_8, 0, 2, 0x00 },  LVL_NOM, 1050000, 1 },
+	[1]  = { {  432000, HFPLL, 2, 0, 0x20 },  LVL_NOM, 1050000, 1 },
+	[2]  = { {  486000, HFPLL, 2, 0, 0x24 },  LVL_NOM, 1050000, 1 },
+	[3]  = { {  540000, HFPLL, 2, 0, 0x28 },  LVL_NOM, 1050000, 2 },
+	[4]  = { {  594000, HFPLL, 1, 0, 0x16 },  LVL_NOM, 1050000, 2 },
+	[5]  = { {  648000, HFPLL, 1, 0, 0x18 },  LVL_NOM, 1050000, 2 },
+	[6]  = { {  702000, HFPLL, 1, 0, 0x1A },  LVL_NOM, 1050000, 3 },
+	[7]  = { {  756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 3 },
+	[8]  = { {  810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 3 },
+	[9]  = { {  864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
+	[10] = { {  918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 4 },
+	[11] = { {  972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 4 },
 };
 
 /* TODO: Update core voltages when data is available. */
 static struct acpu_level acpu_freq_tbl[] __initdata = {
-	{ 0, { STBY_KHZ, QSB,   0, 0, 0x00 }, L2(0),   900000 },
-	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(1),   900000 },
-	{ 1, {   432000, HFPLL, 2, 0, 0x20 }, L2(5),   925000 },
-	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(5),   925000 },
-	{ 1, {   540000, HFPLL, 2, 0, 0x28 }, L2(5),   937500 },
-	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(5),   962500 },
-	{ 1, {   648000, HFPLL, 1, 0, 0x18 }, L2(9),   987500 },
-	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(9),  1000000 },
-	{ 1, {   756000, HFPLL, 1, 0, 0x1C }, L2(9),  1025000 },
-	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(9),  1062500 },
-	{ 1, {   864000, HFPLL, 1, 0, 0x20 }, L2(12), 1062500 },
-	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(12), 1087500 },
-	{ 1, {   972000, HFPLL, 1, 0, 0x24 }, L2(12), 1100000 },
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   900000 },
+	{ 1, {   432000, HFPLL, 2, 0, 0x20 }, L2(4),   925000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(4),   925000 },
+	{ 1, {   540000, HFPLL, 2, 0, 0x28 }, L2(4),   937500 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(4),   962500 },
+	{ 1, {   648000, HFPLL, 1, 0, 0x18 }, L2(8),   987500 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(8),  1000000 },
+	{ 1, {   756000, HFPLL, 1, 0, 0x1C }, L2(8),  1025000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(8),  1062500 },
+	{ 1, {   864000, HFPLL, 1, 0, 0x20 }, L2(11), 1062500 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(11), 1087500 },
+	{ 1, {   972000, HFPLL, 1, 0, 0x24 }, L2(11), 1100000 },
 	{ 0, { 0 } }
 };
 
 static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
-	[PVS_SLOW]    = { acpu_freq_tbl, sizeof(acpu_freq_tbl) },
-	[PVS_NOMINAL] = { acpu_freq_tbl, sizeof(acpu_freq_tbl) },
-	[PVS_FAST]    = { acpu_freq_tbl, sizeof(acpu_freq_tbl) },
+	[PVS_SLOW]    = { acpu_freq_tbl, sizeof(acpu_freq_tbl),     0 },
+	[PVS_NOMINAL] = { acpu_freq_tbl, sizeof(acpu_freq_tbl), 25000 },
+	[PVS_FAST]    = { acpu_freq_tbl, sizeof(acpu_freq_tbl), 25000 },
 };
 
 static struct acpuclk_krait_params acpuclk_8627_params __initdata = {
@@ -138,6 +138,7 @@
 	.l2_freq_tbl_size = sizeof(l2_freq_tbl),
 	.bus_scale = &bus_scale_data,
 	.qfprom_phys_base = 0x00700000,
+	.stby_khz = 384000,
 };
 
 static int __init acpuclk_8627_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/acpuclock-8930.c b/arch/arm/mach-msm/acpuclock-8930.c
index d04ce03..5647d14 100644
--- a/arch/arm/mach-msm/acpuclock-8930.c
+++ b/arch/arm/mach-msm/acpuclock-8930.c
@@ -37,10 +37,12 @@
 	.has_droop_ctl = true,
 	.droop_offset = 0x14,
 	.droop_val = 0x0108C000,
-	.low_vdd_l_max = 40,
+	.low_vdd_l_max = 22,
+	.nom_vdd_l_max = 42,
 	.vdd[HFPLL_VDD_NONE] = LVL_NONE,
 	.vdd[HFPLL_VDD_LOW]  = LVL_LOW,
 	.vdd[HFPLL_VDD_NOM]  = LVL_NOM,
+	.vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
 };
 
 static struct scalable scalable[] __initdata = {
@@ -49,7 +51,7 @@
 		.aux_clk_sel_phys = 0x02088014,
 		.aux_clk_sel = 3,
 		.l2cpmr_iaddr = 0x4501,
-		.vreg[VREG_CORE] = { "krait0", 1300000, 1740000 },
+		.vreg[VREG_CORE] = { "krait0", 1300000 },
 		.vreg[VREG_MEM]  = { "krait0_mem", 1150000 },
 		.vreg[VREG_DIG]  = { "krait0_dig", 1150000 },
 		.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
@@ -59,7 +61,7 @@
 		.aux_clk_sel_phys = 0x02098014,
 		.aux_clk_sel = 3,
 		.l2cpmr_iaddr = 0x5501,
-		.vreg[VREG_CORE] = { "krait1", 1300000, 1740000 },
+		.vreg[VREG_CORE] = { "krait1", 1300000 },
 		.vreg[VREG_MEM]  = { "krait1_mem", 1150000 },
 		.vreg[VREG_DIG]  = { "krait1_dig", 1150000 },
 		.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
@@ -93,92 +95,88 @@
 
 /* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
 static struct l2_level l2_freq_tbl[] __initdata = {
-	[0]  = { {STBY_KHZ, QSB,   0, 0, 0x00 },  LVL_NOM, 1050000, 0 },
-	[1]  = { {  384000, PLL_8, 0, 2, 0x00 },  LVL_NOM, 1050000, 1 },
-	[2]  = { {  432000, HFPLL, 2, 0, 0x20 },  LVL_NOM, 1050000, 2 },
-	[3]  = { {  486000, HFPLL, 2, 0, 0x24 },  LVL_NOM, 1050000, 2 },
-	[4]  = { {  540000, HFPLL, 2, 0, 0x28 },  LVL_NOM, 1050000, 2 },
-	[5]  = { {  594000, HFPLL, 1, 0, 0x16 },  LVL_NOM, 1050000, 2 },
-	[6]  = { {  648000, HFPLL, 1, 0, 0x18 },  LVL_NOM, 1050000, 4 },
-	[7]  = { {  702000, HFPLL, 1, 0, 0x1A },  LVL_NOM, 1050000, 4 },
-	[8]  = { {  756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 4 },
-	[9]  = { {  810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 4 },
-	[10] = { {  864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
-	[11] = { {  918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 7 },
-	[12] = { {  972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 7 },
-	[13] = { { 1026000, HFPLL, 1, 0, 0x26 }, LVL_HIGH, 1150000, 7 },
-	[14] = { { 1080000, HFPLL, 1, 0, 0x28 }, LVL_HIGH, 1150000, 7 },
-	[15] = { { 1134000, HFPLL, 1, 0, 0x2A }, LVL_HIGH, 1150000, 7 },
-	[16] = { { 1188000, HFPLL, 1, 0, 0x2C }, LVL_HIGH, 1150000, 7 },
+	[0]  = { {  384000, PLL_8, 0, 2, 0x00 },  LVL_NOM, 1050000, 1 },
+	[1]  = { {  432000, HFPLL, 2, 0, 0x20 },  LVL_NOM, 1050000, 2 },
+	[2]  = { {  486000, HFPLL, 2, 0, 0x24 },  LVL_NOM, 1050000, 2 },
+	[3]  = { {  540000, HFPLL, 2, 0, 0x28 },  LVL_NOM, 1050000, 2 },
+	[4]  = { {  594000, HFPLL, 1, 0, 0x16 },  LVL_NOM, 1050000, 2 },
+	[5]  = { {  648000, HFPLL, 1, 0, 0x18 },  LVL_NOM, 1050000, 4 },
+	[6]  = { {  702000, HFPLL, 1, 0, 0x1A },  LVL_NOM, 1050000, 4 },
+	[7]  = { {  756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 4 },
+	[8]  = { {  810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 4 },
+	[9]  = { {  864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
+	[10] = { {  918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 7 },
+	[11] = { {  972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 7 },
+	[12] = { { 1026000, HFPLL, 1, 0, 0x26 }, LVL_HIGH, 1150000, 7 },
+	[13] = { { 1080000, HFPLL, 1, 0, 0x28 }, LVL_HIGH, 1150000, 7 },
+	[14] = { { 1134000, HFPLL, 1, 0, 0x2A }, LVL_HIGH, 1150000, 7 },
+	[15] = { { 1188000, HFPLL, 1, 0, 0x2C }, LVL_HIGH, 1150000, 7 },
 };
 
 static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
-	{ 0, { STBY_KHZ, QSB,   0, 0, 0x00 }, L2(0),   950000 },
-	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(1),   950000 },
-	{ 1, {   432000, HFPLL, 2, 0, 0x20 }, L2(6),   975000 },
-	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(6),   975000 },
-	{ 1, {   540000, HFPLL, 2, 0, 0x28 }, L2(6),  1000000 },
-	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(6),  1000000 },
-	{ 1, {   648000, HFPLL, 1, 0, 0x18 }, L2(6),  1025000 },
-	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(6),  1025000 },
-	{ 1, {   756000, HFPLL, 1, 0, 0x1C }, L2(11), 1075000 },
-	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(11), 1075000 },
-	{ 1, {   864000, HFPLL, 1, 0, 0x20 }, L2(11), 1100000 },
-	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(11), 1100000 },
-	{ 1, {   972000, HFPLL, 1, 0, 0x24 }, L2(11), 1125000 },
-	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1125000 },
-	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1175000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1175000 },
-	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1200000 },
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   950000 },
+	{ 1, {   432000, HFPLL, 2, 0, 0x20 }, L2(5),   975000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(5),   975000 },
+	{ 1, {   540000, HFPLL, 2, 0, 0x28 }, L2(5),  1000000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(5),  1000000 },
+	{ 1, {   648000, HFPLL, 1, 0, 0x18 }, L2(5),  1025000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(5),  1025000 },
+	{ 1, {   756000, HFPLL, 1, 0, 0x1C }, L2(10), 1075000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(10), 1075000 },
+	{ 1, {   864000, HFPLL, 1, 0, 0x20 }, L2(10), 1100000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(10), 1100000 },
+	{ 1, {   972000, HFPLL, 1, 0, 0x24 }, L2(10), 1125000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1125000 },
+	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1175000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1175000 },
+	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1200000 },
 	{ 0, { 0 } }
 };
 
 static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
-	{ 0, { STBY_KHZ, QSB,   0, 0, 0x00 }, L2(0),   925000 },
-	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(1),   925000 },
-	{ 1, {   432000, HFPLL, 2, 0, 0x20 }, L2(6),   950000 },
-	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(6),   950000 },
-	{ 1, {   540000, HFPLL, 2, 0, 0x28 }, L2(6),   975000 },
-	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(6),   975000 },
-	{ 1, {   648000, HFPLL, 1, 0, 0x18 }, L2(6),  1000000 },
-	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(6),  1000000 },
-	{ 1, {   756000, HFPLL, 1, 0, 0x1C }, L2(11), 1050000 },
-	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(11), 1050000 },
-	{ 1, {   864000, HFPLL, 1, 0, 0x20 }, L2(11), 1075000 },
-	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(11), 1075000 },
-	{ 1, {   972000, HFPLL, 1, 0, 0x24 }, L2(11), 1100000 },
-	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1100000 },
-	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1150000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1150000 },
-	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1175000 },
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   925000 },
+	{ 1, {   432000, HFPLL, 2, 0, 0x20 }, L2(5),   950000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(5),   950000 },
+	{ 1, {   540000, HFPLL, 2, 0, 0x28 }, L2(5),   975000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(5),   975000 },
+	{ 1, {   648000, HFPLL, 1, 0, 0x18 }, L2(5),  1000000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(5),  1000000 },
+	{ 1, {   756000, HFPLL, 1, 0, 0x1C }, L2(10), 1050000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(10), 1050000 },
+	{ 1, {   864000, HFPLL, 1, 0, 0x20 }, L2(10), 1075000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(10), 1075000 },
+	{ 1, {   972000, HFPLL, 1, 0, 0x24 }, L2(10), 1100000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1100000 },
+	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1150000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1150000 },
+	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1175000 },
 	{ 0, { 0 } }
 };
 
 static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
-	{ 0, { STBY_KHZ, QSB,   0, 0, 0x00 }, L2(0),   900000 },
-	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(1),   900000 },
-	{ 1, {   432000, HFPLL, 2, 0, 0x20 }, L2(6),   900000 },
-	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(6),   900000 },
-	{ 1, {   540000, HFPLL, 2, 0, 0x28 }, L2(6),   925000 },
-	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(6),   925000 },
-	{ 1, {   648000, HFPLL, 1, 0, 0x18 }, L2(6),   950000 },
-	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(6),   950000 },
-	{ 1, {   756000, HFPLL, 1, 0, 0x1C }, L2(11), 1000000 },
-	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(11), 1000000 },
-	{ 1, {   864000, HFPLL, 1, 0, 0x20 }, L2(11), 1025000 },
-	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(11), 1025000 },
-	{ 1, {   972000, HFPLL, 1, 0, 0x24 }, L2(11), 1050000 },
-	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(11), 1050000 },
-	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(16), 1100000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(16), 1100000 },
-	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(16), 1125000 },
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   900000 },
+	{ 1, {   432000, HFPLL, 2, 0, 0x20 }, L2(5),   900000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(5),   900000 },
+	{ 1, {   540000, HFPLL, 2, 0, 0x28 }, L2(5),   925000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(5),   925000 },
+	{ 1, {   648000, HFPLL, 1, 0, 0x18 }, L2(5),   950000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(5),   950000 },
+	{ 1, {   756000, HFPLL, 1, 0, 0x1C }, L2(10), 1000000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(10), 1000000 },
+	{ 1, {   864000, HFPLL, 1, 0, 0x20 }, L2(10), 1025000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(10), 1025000 },
+	{ 1, {   972000, HFPLL, 1, 0, 0x24 }, L2(10), 1050000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1050000 },
+	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1100000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1100000 },
+	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1125000 },
 	{ 0, { 0 } }
 };
 
 static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
-	[PVS_SLOW]    = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow) },
-	[PVS_NOMINAL] = { acpu_freq_tbl_nom,  sizeof(acpu_freq_tbl_nom)  },
-	[PVS_FAST]    = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast) },
+[PVS_SLOW]    = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow),     0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom,  sizeof(acpu_freq_tbl_nom),  25000 },
+[PVS_FAST]    = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
 };
 
 static struct acpuclk_krait_params acpuclk_8930_params __initdata = {
@@ -190,6 +188,7 @@
 	.l2_freq_tbl_size = sizeof(l2_freq_tbl),
 	.bus_scale = &bus_scale_data,
 	.qfprom_phys_base = 0x00700000,
+	.stby_khz = 384000,
 };
 
 static int __init acpuclk_8930_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/acpuclock-8930aa.c b/arch/arm/mach-msm/acpuclock-8930aa.c
new file mode 100644
index 0000000..34ba1da
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8930aa.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <mach/rpm-regulator.h>
+#include <mach/msm_bus_board.h>
+#include <mach/msm_bus.h>
+
+#include "acpuclock.h"
+#include "acpuclock-krait.h"
+
+/* Corner type vreg VDD values */
+#define LVL_NONE	RPM_VREG_CORNER_NONE
+#define LVL_LOW		RPM_VREG_CORNER_LOW
+#define LVL_NOM		RPM_VREG_CORNER_NOMINAL
+#define LVL_HIGH	RPM_VREG_CORNER_HIGH
+
+static struct hfpll_data hfpll_data __initdata = {
+	.mode_offset = 0x00,
+	.l_offset = 0x08,
+	.m_offset = 0x0C,
+	.n_offset = 0x10,
+	.config_offset = 0x04,
+	.config_val = 0x7845C665,
+	.has_droop_ctl = true,
+	.droop_offset = 0x14,
+	.droop_val = 0x0108C000,
+	.low_vdd_l_max = 22,
+	.nom_vdd_l_max = 42,
+	.vdd[HFPLL_VDD_NONE] = LVL_NONE,
+	.vdd[HFPLL_VDD_LOW]  = LVL_LOW,
+	.vdd[HFPLL_VDD_NOM]  = LVL_NOM,
+	.vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
+};
+
+static struct scalable scalable[] __initdata = {
+	[CPU0] = {
+		.hfpll_phys_base = 0x00903200,
+		.aux_clk_sel_phys = 0x02088014,
+		.aux_clk_sel = 3,
+		.l2cpmr_iaddr = 0x4501,
+		.vreg[VREG_CORE] = { "krait0", 1300000 },
+		.vreg[VREG_MEM]  = { "krait0_mem", 1150000 },
+		.vreg[VREG_DIG]  = { "krait0_dig", 1150000 },
+		.vreg[VREG_HFPLL_A] = { "krait0_hfpll", 1800000 },
+	},
+	[CPU1] = {
+		.hfpll_phys_base = 0x00903300,
+		.aux_clk_sel_phys = 0x02098014,
+		.aux_clk_sel = 3,
+		.l2cpmr_iaddr = 0x5501,
+		.vreg[VREG_CORE] = { "krait1", 1300000 },
+		.vreg[VREG_MEM]  = { "krait1_mem", 1150000 },
+		.vreg[VREG_DIG]  = { "krait1_dig", 1150000 },
+		.vreg[VREG_HFPLL_A] = { "krait1_hfpll", 1800000 },
+	},
+	[L2] = {
+		.hfpll_phys_base = 0x00903400,
+		.aux_clk_sel_phys = 0x02011028,
+		.aux_clk_sel = 3,
+		.l2cpmr_iaddr = 0x0500,
+		.vreg[VREG_HFPLL_A] = { "l2_hfpll", 1800000 },
+	},
+};
+
+static struct msm_bus_paths bw_level_tbl[] __initdata = {
+	[0] =  BW_MBPS(640), /* At least  80 MHz on bus. */
+	[1] = BW_MBPS(1064), /* At least 133 MHz on bus. */
+	[2] = BW_MBPS(1600), /* At least 200 MHz on bus. */
+	[3] = BW_MBPS(2128), /* At least 266 MHz on bus. */
+	[4] = BW_MBPS(3200), /* At least 400 MHz on bus. */
+	[5] = BW_MBPS(3600), /* At least 450 MHz on bus. */
+	[6] = BW_MBPS(3936), /* At least 492 MHz on bus. */
+	[7] = BW_MBPS(4264), /* At least 533 MHz on bus. */
+};
+
+static struct msm_bus_scale_pdata bus_scale_data __initdata = {
+	.usecase = bw_level_tbl,
+	.num_usecases = ARRAY_SIZE(bw_level_tbl),
+	.active_only = 1,
+	.name = "acpuclk-8930aa",
+};
+
+/* TODO: Update vdd_dig, vdd_mem and bw when data is available. */
+static struct l2_level l2_freq_tbl[] __initdata = {
+	[0]  = { {  384000, PLL_8, 0, 2, 0x00 },  LVL_NOM, 1050000, 1 },
+	[1]  = { {  432000, HFPLL, 2, 0, 0x20 },  LVL_NOM, 1050000, 2 },
+	[2]  = { {  486000, HFPLL, 2, 0, 0x24 },  LVL_NOM, 1050000, 2 },
+	[3]  = { {  540000, HFPLL, 2, 0, 0x28 },  LVL_NOM, 1050000, 2 },
+	[4]  = { {  594000, HFPLL, 1, 0, 0x16 },  LVL_NOM, 1050000, 2 },
+	[5]  = { {  648000, HFPLL, 1, 0, 0x18 },  LVL_NOM, 1050000, 4 },
+	[6]  = { {  702000, HFPLL, 1, 0, 0x1A },  LVL_NOM, 1050000, 4 },
+	[7]  = { {  756000, HFPLL, 1, 0, 0x1C }, LVL_HIGH, 1150000, 4 },
+	[8]  = { {  810000, HFPLL, 1, 0, 0x1E }, LVL_HIGH, 1150000, 4 },
+	[9]  = { {  864000, HFPLL, 1, 0, 0x20 }, LVL_HIGH, 1150000, 4 },
+	[10] = { {  918000, HFPLL, 1, 0, 0x22 }, LVL_HIGH, 1150000, 7 },
+	[11] = { {  972000, HFPLL, 1, 0, 0x24 }, LVL_HIGH, 1150000, 7 },
+	[12] = { { 1026000, HFPLL, 1, 0, 0x26 }, LVL_HIGH, 1150000, 7 },
+	[13] = { { 1080000, HFPLL, 1, 0, 0x28 }, LVL_HIGH, 1150000, 7 },
+	[14] = { { 1134000, HFPLL, 1, 0, 0x2A }, LVL_HIGH, 1150000, 7 },
+	[15] = { { 1188000, HFPLL, 1, 0, 0x2C }, LVL_HIGH, 1150000, 7 },
+};
+
+static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   950000 },
+	{ 1, {   432000, HFPLL, 2, 0, 0x20 }, L2(5),   975000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(5),   975000 },
+	{ 1, {   540000, HFPLL, 2, 0, 0x28 }, L2(5),  1000000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(5),  1000000 },
+	{ 1, {   648000, HFPLL, 1, 0, 0x18 }, L2(5),  1025000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(5),  1025000 },
+	{ 1, {   756000, HFPLL, 1, 0, 0x1C }, L2(10), 1075000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(10), 1075000 },
+	{ 1, {   864000, HFPLL, 1, 0, 0x20 }, L2(10), 1100000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(10), 1100000 },
+	{ 1, {   972000, HFPLL, 1, 0, 0x24 }, L2(10), 1125000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1125000 },
+	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1175000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1175000 },
+	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1200000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1200000 },
+	{ 1, {  1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1225000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1225000 },
+	{ 1, {  1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1237500 },
+	{ 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   925000 },
+	{ 1, {   432000, HFPLL, 2, 0, 0x20 }, L2(5),   950000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(5),   950000 },
+	{ 1, {   540000, HFPLL, 2, 0, 0x28 }, L2(5),   975000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(5),   975000 },
+	{ 1, {   648000, HFPLL, 1, 0, 0x18 }, L2(5),  1000000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(5),  1000000 },
+	{ 1, {   756000, HFPLL, 1, 0, 0x1C }, L2(10), 1050000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(10), 1050000 },
+	{ 1, {   864000, HFPLL, 1, 0, 0x20 }, L2(10), 1075000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(10), 1075000 },
+	{ 1, {   972000, HFPLL, 1, 0, 0x24 }, L2(10), 1100000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1100000 },
+	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1150000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1150000 },
+	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1175000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1175000 },
+	{ 1, {  1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1200000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1200000 },
+	{ 1, {  1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1212500 },
+	{ 0, { 0 } }
+};
+
+static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   900000 },
+	{ 1, {   432000, HFPLL, 2, 0, 0x20 }, L2(5),   900000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(5),   900000 },
+	{ 1, {   540000, HFPLL, 2, 0, 0x28 }, L2(5),   925000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(5),   925000 },
+	{ 1, {   648000, HFPLL, 1, 0, 0x18 }, L2(5),   950000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(5),   950000 },
+	{ 1, {   756000, HFPLL, 1, 0, 0x1C }, L2(10), 1000000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(10), 1000000 },
+	{ 1, {   864000, HFPLL, 1, 0, 0x20 }, L2(10), 1025000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(10), 1025000 },
+	{ 1, {   972000, HFPLL, 1, 0, 0x24 }, L2(10), 1050000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(10), 1050000 },
+	{ 1, {  1080000, HFPLL, 1, 0, 0x28 }, L2(15), 1100000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(15), 1100000 },
+	{ 1, {  1188000, HFPLL, 1, 0, 0x2C }, L2(15), 1125000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(15), 1125000 },
+	{ 1, {  1296000, HFPLL, 1, 0, 0x30 }, L2(15), 1150000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(15), 1150000 },
+	{ 1, {  1404000, HFPLL, 1, 0, 0x34 }, L2(15), 1162500 },
+	{ 0, { 0 } }
+};
+
+static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
+[PVS_SLOW]    = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow),     0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom,  sizeof(acpu_freq_tbl_nom),  25000 },
+[PVS_FAST]    = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
+};
+
+static struct acpuclk_krait_params acpuclk_8930aa_params __initdata = {
+	.scalable = scalable,
+	.scalable_size = sizeof(scalable),
+	.hfpll_data = &hfpll_data,
+	.pvs_tables = pvs_tables,
+	.l2_freq_tbl = l2_freq_tbl,
+	.l2_freq_tbl_size = sizeof(l2_freq_tbl),
+	.bus_scale = &bus_scale_data,
+	.qfprom_phys_base = 0x00700000,
+	.stby_khz = 384000,
+};
+
+static int __init acpuclk_8930aa_probe(struct platform_device *pdev)
+{
+	return acpuclk_krait_init(&pdev->dev, &acpuclk_8930aa_params);
+}
+
+static struct platform_driver acpuclk_8930aa_driver = {
+	.driver = {
+		.name = "acpuclk-8930aa",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init acpuclk_8930aa_init(void)
+{
+	return platform_driver_probe(&acpuclk_8930aa_driver,
+				     acpuclk_8930aa_probe);
+}
+device_initcall(acpuclk_8930aa_init);
diff --git a/arch/arm/mach-msm/acpuclock-8960.c b/arch/arm/mach-msm/acpuclock-8960.c
index 8623c2b..8cc4b13 100644
--- a/arch/arm/mach-msm/acpuclock-8960.c
+++ b/arch/arm/mach-msm/acpuclock-8960.c
@@ -31,10 +31,12 @@
 	.has_droop_ctl = true,
 	.droop_offset = 0x14,
 	.droop_val = 0x0108C000,
-	.low_vdd_l_max = 40,
-	.vdd[HFPLL_VDD_NONE] = 0,
-	.vdd[HFPLL_VDD_LOW]  = 850000,
+	.low_vdd_l_max = 22,
+	.nom_vdd_l_max = 42,
+	.vdd[HFPLL_VDD_NONE] =       0,
+	.vdd[HFPLL_VDD_LOW]  =  945000,
 	.vdd[HFPLL_VDD_NOM]  = 1050000,
+	.vdd[HFPLL_VDD_HIGH] = 1150000,
 };
 
 static struct scalable scalable[] __initdata = {
@@ -43,7 +45,7 @@
 		.aux_clk_sel_phys = 0x02088014,
 		.aux_clk_sel = 3,
 		.l2cpmr_iaddr = 0x4501,
-		.vreg[VREG_CORE] = { "krait0", 1300000, 3200000 },
+		.vreg[VREG_CORE] = { "krait0", 1300000 },
 		.vreg[VREG_MEM]  = { "krait0_mem", 1150000 },
 		.vreg[VREG_DIG]  = { "krait0_dig", 1150000 },
 		.vreg[VREG_HFPLL_A] = { "krait0_s8", 2050000 },
@@ -54,7 +56,7 @@
 		.aux_clk_sel_phys = 0x02098014,
 		.aux_clk_sel = 3,
 		.l2cpmr_iaddr = 0x5501,
-		.vreg[VREG_CORE] = { "krait1", 1300000, 3200000 },
+		.vreg[VREG_CORE] = { "krait1", 1300000 },
 		.vreg[VREG_MEM]  = { "krait1_mem", 1150000 },
 		.vreg[VREG_DIG]  = { "krait1_dig", 1150000 },
 		.vreg[VREG_HFPLL_A] = { "krait1_s8", 2050000 },
@@ -88,113 +90,109 @@
 };
 
 static struct l2_level l2_freq_tbl[] __initdata = {
-	[0]  = { {STBY_KHZ, QSB,   0, 0, 0x00 }, 1050000, 1050000, 0 },
-	[1]  = { {  384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
-	[2]  = { {  432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
-	[3]  = { {  486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
-	[4]  = { {  540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
-	[5]  = { {  594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
-	[6]  = { {  648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
-	[7]  = { {  702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
-	[8]  = { {  756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
-	[9]  = { {  810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
-	[10] = { {  864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
-	[11] = { {  918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 6 },
-	[12] = { {  972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 6 },
-	[13] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 6 },
-	[14] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 6 },
-	[15] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 6 },
-	[16] = { { 1188000, HFPLL, 1, 0, 0x2C }, 1150000, 1150000, 6 },
-	[17] = { { 1242000, HFPLL, 1, 0, 0x2E }, 1150000, 1150000, 6 },
-	[18] = { { 1296000, HFPLL, 1, 0, 0x30 }, 1150000, 1150000, 6 },
-	[19] = { { 1350000, HFPLL, 1, 0, 0x32 }, 1150000, 1150000, 6 },
+	[0]  = { {  384000, PLL_8, 0, 2, 0x00 }, 1050000, 1050000, 1 },
+	[1]  = { {  432000, HFPLL, 2, 0, 0x20 }, 1050000, 1050000, 2 },
+	[2]  = { {  486000, HFPLL, 2, 0, 0x24 }, 1050000, 1050000, 2 },
+	[3]  = { {  540000, HFPLL, 2, 0, 0x28 }, 1050000, 1050000, 2 },
+	[4]  = { {  594000, HFPLL, 1, 0, 0x16 }, 1050000, 1050000, 2 },
+	[5]  = { {  648000, HFPLL, 1, 0, 0x18 }, 1050000, 1050000, 4 },
+	[6]  = { {  702000, HFPLL, 1, 0, 0x1A }, 1050000, 1050000, 4 },
+	[7]  = { {  756000, HFPLL, 1, 0, 0x1C }, 1150000, 1150000, 4 },
+	[8]  = { {  810000, HFPLL, 1, 0, 0x1E }, 1150000, 1150000, 4 },
+	[9]  = { {  864000, HFPLL, 1, 0, 0x20 }, 1150000, 1150000, 4 },
+	[10] = { {  918000, HFPLL, 1, 0, 0x22 }, 1150000, 1150000, 6 },
+	[11] = { {  972000, HFPLL, 1, 0, 0x24 }, 1150000, 1150000, 6 },
+	[12] = { { 1026000, HFPLL, 1, 0, 0x26 }, 1150000, 1150000, 6 },
+	[13] = { { 1080000, HFPLL, 1, 0, 0x28 }, 1150000, 1150000, 6 },
+	[14] = { { 1134000, HFPLL, 1, 0, 0x2A }, 1150000, 1150000, 6 },
+	[15] = { { 1188000, HFPLL, 1, 0, 0x2C }, 1150000, 1150000, 6 },
+	[16] = { { 1242000, HFPLL, 1, 0, 0x2E }, 1150000, 1150000, 6 },
+	[17] = { { 1296000, HFPLL, 1, 0, 0x30 }, 1150000, 1150000, 6 },
+	[18] = { { 1350000, HFPLL, 1, 0, 0x32 }, 1150000, 1150000, 6 },
 };
 
 static struct acpu_level acpu_freq_tbl_slow[] __initdata = {
-	{ 0, { STBY_KHZ, QSB,   0, 0, 0x00 }, L2(0),   950000 },
-	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(1),   950000 },
-	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(7),   975000 },
-	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(7),   975000 },
-	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(7),  1000000 },
-	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(7),  1000000 },
-	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(7),  1025000 },
-	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(7),  1025000 },
-	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(7),  1075000 },
-	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(7),  1075000 },
-	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(7),  1100000 },
-	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(7),  1100000 },
-	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(7),  1125000 },
-	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(7),  1125000 },
-	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1175000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1175000 },
-	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1200000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1200000 },
-	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1225000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1225000 },
-	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1237500 },
-	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1237500 },
-	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1250000 },
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   950000 },
+	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(6),   975000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(6),   975000 },
+	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(6),  1000000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(6),  1000000 },
+	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(6),  1025000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(6),  1025000 },
+	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(6),  1075000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(6),  1075000 },
+	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(6),  1100000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(6),  1100000 },
+	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(6),  1125000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(6),  1125000 },
+	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(18), 1175000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(18), 1175000 },
+	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(18), 1200000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(18), 1200000 },
+	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(18), 1225000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(18), 1225000 },
+	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(18), 1237500 },
+	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(18), 1237500 },
+	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(18), 1250000 },
 	{ 0, { 0 } }
 };
 
 static struct acpu_level acpu_freq_tbl_nom[] __initdata = {
-	{ 0, { STBY_KHZ, QSB,   0, 0, 0x00 }, L2(0),   900000 },
-	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(1),   900000 },
-	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(7),   925000 },
-	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(7),   925000 },
-	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(7),   950000 },
-	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(7),   950000 },
-	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(7),   975000 },
-	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(7),   975000 },
-	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(7),  1025000 },
-	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(7),  1025000 },
-	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(7),  1050000 },
-	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(7),  1050000 },
-	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(7),  1075000 },
-	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(7),  1075000 },
-	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1125000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1125000 },
-	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1150000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1150000 },
-	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1175000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1175000 },
-	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1187500 },
-	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1187500 },
-	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1200000 },
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   900000 },
+	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(6),   925000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(6),   925000 },
+	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(6),   950000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(6),   950000 },
+	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(6),   975000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(6),   975000 },
+	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(6),  1025000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(6),  1025000 },
+	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(6),  1050000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(6),  1050000 },
+	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(6),  1075000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(6),  1075000 },
+	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(18), 1125000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(18), 1125000 },
+	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(18), 1150000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(18), 1150000 },
+	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(18), 1175000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(18), 1175000 },
+	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(18), 1187500 },
+	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(18), 1187500 },
+	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(18), 1200000 },
 	{ 0, { 0 } }
 };
 
 static struct acpu_level acpu_freq_tbl_fast[] __initdata = {
-	{ 0, { STBY_KHZ, QSB,   0, 0, 0x00 }, L2(0),   850000 },
-	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(1),   850000 },
-	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(7),   875000 },
-	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(7),   875000 },
-	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(7),   900000 },
-	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(7),   900000 },
-	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(7),   925000 },
-	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(7),   925000 },
-	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(7),   975000 },
-	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(7),   975000 },
-	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(7),  1000000 },
-	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(7),  1000000 },
-	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(7),  1025000 },
-	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(7),  1025000 },
-	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(19), 1075000 },
-	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(19), 1075000 },
-	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(19), 1100000 },
-	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(19), 1100000 },
-	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(19), 1125000 },
-	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(19), 1125000 },
-	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(19), 1137500 },
-	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(19), 1137500 },
-	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(19), 1150000 },
+	{ 1, {   384000, PLL_8, 0, 2, 0x00 }, L2(0),   850000 },
+	{ 0, {   432000, HFPLL, 2, 0, 0x20 }, L2(6),   875000 },
+	{ 1, {   486000, HFPLL, 2, 0, 0x24 }, L2(6),   875000 },
+	{ 0, {   540000, HFPLL, 2, 0, 0x28 }, L2(6),   900000 },
+	{ 1, {   594000, HFPLL, 1, 0, 0x16 }, L2(6),   900000 },
+	{ 0, {   648000, HFPLL, 1, 0, 0x18 }, L2(6),   925000 },
+	{ 1, {   702000, HFPLL, 1, 0, 0x1A }, L2(6),   925000 },
+	{ 0, {   756000, HFPLL, 1, 0, 0x1C }, L2(6),   975000 },
+	{ 1, {   810000, HFPLL, 1, 0, 0x1E }, L2(6),   975000 },
+	{ 0, {   864000, HFPLL, 1, 0, 0x20 }, L2(6),  1000000 },
+	{ 1, {   918000, HFPLL, 1, 0, 0x22 }, L2(6),  1000000 },
+	{ 0, {   972000, HFPLL, 1, 0, 0x24 }, L2(6),  1025000 },
+	{ 1, {  1026000, HFPLL, 1, 0, 0x26 }, L2(6),  1025000 },
+	{ 0, {  1080000, HFPLL, 1, 0, 0x28 }, L2(18), 1075000 },
+	{ 1, {  1134000, HFPLL, 1, 0, 0x2A }, L2(18), 1075000 },
+	{ 0, {  1188000, HFPLL, 1, 0, 0x2C }, L2(18), 1100000 },
+	{ 1, {  1242000, HFPLL, 1, 0, 0x2E }, L2(18), 1100000 },
+	{ 0, {  1296000, HFPLL, 1, 0, 0x30 }, L2(18), 1125000 },
+	{ 1, {  1350000, HFPLL, 1, 0, 0x32 }, L2(18), 1125000 },
+	{ 0, {  1404000, HFPLL, 1, 0, 0x34 }, L2(18), 1137500 },
+	{ 1, {  1458000, HFPLL, 1, 0, 0x36 }, L2(18), 1137500 },
+	{ 1, {  1512000, HFPLL, 1, 0, 0x38 }, L2(18), 1150000 },
 	{ 0, { 0 } }
 };
 
 static struct pvs_table pvs_tables[NUM_PVS] __initdata = {
-	[PVS_SLOW]    = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow) },
-	[PVS_NOMINAL] = { acpu_freq_tbl_nom,  sizeof(acpu_freq_tbl_nom)  },
-	[PVS_FAST]    = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast) },
+[PVS_SLOW]    = { acpu_freq_tbl_slow, sizeof(acpu_freq_tbl_slow),     0 },
+[PVS_NOMINAL] = { acpu_freq_tbl_nom,  sizeof(acpu_freq_tbl_nom),  25000 },
+[PVS_FAST]    = { acpu_freq_tbl_fast, sizeof(acpu_freq_tbl_fast), 25000 },
 };
 
 static struct acpuclk_krait_params acpuclk_8960_params __initdata = {
@@ -206,6 +204,7 @@
 	.l2_freq_tbl_size = sizeof(l2_freq_tbl),
 	.bus_scale = &bus_scale_data,
 	.qfprom_phys_base = 0x00700000,
+	.stby_khz = 384000,
 };
 
 static int __init acpuclk_8960_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/acpuclock-8974.c b/arch/arm/mach-msm/acpuclock-8974.c
index 9ed038b..8c89014 100644
--- a/arch/arm/mach-msm/acpuclock-8974.c
+++ b/arch/arm/mach-msm/acpuclock-8974.c
@@ -33,20 +33,26 @@
 	.l_offset = 0x04,
 	.m_offset = 0x08,
 	.n_offset = 0x0C,
+	.has_user_reg = true,
+	.user_offset = 0x10,
 	.config_offset = 0x14,
-	/* TODO: Verify magic number for 8974 when available. */
-	.config_val = 0x7845C665,
+	/* TODO: Verify magic numbers when final values are available. */
+	.user_val = 0x8,
+	.config_val = 0x04D0405D,
+	.low_vco_l_max = 65,
 	.low_vdd_l_max = 52,
+	.nom_vdd_l_max = 104,
 	.vdd[HFPLL_VDD_NONE] = LVL_NONE,
 	.vdd[HFPLL_VDD_LOW]  = LVL_LOW,
 	.vdd[HFPLL_VDD_NOM]  = LVL_NOM,
+	.vdd[HFPLL_VDD_HIGH] = LVL_HIGH,
 };
 
 static struct scalable scalable[] __initdata = {
 	[CPU0] = {
 		.hfpll_phys_base = 0xF908A000,
 		.l2cpmr_iaddr = 0x4501,
-		.vreg[VREG_CORE] = { "krait0",     1050000, 3200000 },
+		.vreg[VREG_CORE] = { "krait0",     1050000 },
 		.vreg[VREG_MEM]  = { "krait0_mem", 1050000 },
 		.vreg[VREG_DIG]  = { "krait0_dig", LVL_HIGH },
 		.vreg[VREG_HFPLL_A] = { "krait0_hfpll_a", 2150000 },
@@ -55,7 +61,7 @@
 	[CPU1] = {
 		.hfpll_phys_base = 0xF909A000,
 		.l2cpmr_iaddr = 0x5501,
-		.vreg[VREG_CORE] = { "krait1",     1050000, 3200000 },
+		.vreg[VREG_CORE] = { "krait1",     1050000 },
 		.vreg[VREG_MEM]  = { "krait1_mem", 1050000 },
 		.vreg[VREG_DIG]  = { "krait1_dig", LVL_HIGH },
 		.vreg[VREG_HFPLL_A] = { "krait1_hfpll_a", 2150000 },
@@ -64,7 +70,7 @@
 	[CPU2] = {
 		.hfpll_phys_base = 0xF90AA000,
 		.l2cpmr_iaddr = 0x6501,
-		.vreg[VREG_CORE] = { "krait2",     1050000, 3200000 },
+		.vreg[VREG_CORE] = { "krait2",     1050000 },
 		.vreg[VREG_MEM]  = { "krait2_mem", 1050000 },
 		.vreg[VREG_DIG]  = { "krait2_dig", LVL_HIGH },
 		.vreg[VREG_HFPLL_A] = { "krait2_hfpll_a", 2150000 },
@@ -73,7 +79,7 @@
 	[CPU3] = {
 		.hfpll_phys_base = 0xF90BA000,
 		.l2cpmr_iaddr = 0x7501,
-		.vreg[VREG_CORE] = { "krait3",     1050000, 3200000 },
+		.vreg[VREG_CORE] = { "krait3",     1050000 },
 		.vreg[VREG_MEM]  = { "krait3_mem", 1050000 },
 		.vreg[VREG_DIG]  = { "krait3_dig", LVL_HIGH },
 		.vreg[VREG_HFPLL_A] = { "krait3_hfpll_a", 2150000 },
@@ -103,33 +109,31 @@
 };
 
 static struct l2_level l2_freq_tbl[] __initdata = {
-	[0]  = { {STBY_KHZ, QSB,   0, 0,   0 }, LVL_LOW, 1050000, 0 },
-	[1]  = { {  300000, PLL_0, 0, 2,   0 }, LVL_LOW, 1050000, 2 },
-	[2]  = { {  384000, HFPLL, 2, 0,  40 }, LVL_NOM, 1050000, 2 },
-	[3]  = { {  460800, HFPLL, 2, 0,  48 }, LVL_NOM, 1050000, 2 },
-	[4]  = { {  537600, HFPLL, 1, 0,  28 }, LVL_NOM, 1050000, 2 },
-	[5]  = { {  576000, HFPLL, 1, 0,  30 }, LVL_NOM, 1050000, 3 },
-	[6]  = { {  652800, HFPLL, 1, 0,  34 }, LVL_NOM, 1050000, 3 },
-	[7]  = { {  729600, HFPLL, 1, 0,  38 }, LVL_NOM, 1050000, 3 },
-	[8]  = { {  806400, HFPLL, 1, 0,  42 }, LVL_NOM, 1050000, 3 },
-	[9]  = { {  883200, HFPLL, 1, 0,  46 }, LVL_NOM, 1050000, 4 },
-	[10] = { {  960000, HFPLL, 1, 0,  50 }, LVL_NOM, 1050000, 4 },
-	[11] = { { 1036800, HFPLL, 1, 0,  54 }, LVL_NOM, 1050000, 4 },
+	[0]  = { {  300000, PLL_0, 0, 2,   0 }, LVL_LOW, 1050000, 2 },
+	[1]  = { {  384000, HFPLL, 2, 0,  40 }, LVL_NOM, 1050000, 2 },
+	[2]  = { {  460800, HFPLL, 2, 0,  48 }, LVL_NOM, 1050000, 2 },
+	[3]  = { {  537600, HFPLL, 1, 0,  28 }, LVL_NOM, 1050000, 2 },
+	[4]  = { {  576000, HFPLL, 1, 0,  30 }, LVL_NOM, 1050000, 3 },
+	[5]  = { {  652800, HFPLL, 1, 0,  34 }, LVL_NOM, 1050000, 3 },
+	[6]  = { {  729600, HFPLL, 1, 0,  38 }, LVL_NOM, 1050000, 3 },
+	[7]  = { {  806400, HFPLL, 1, 0,  42 }, LVL_NOM, 1050000, 3 },
+	[8]  = { {  883200, HFPLL, 1, 0,  46 }, LVL_NOM, 1050000, 4 },
+	[9]  = { {  960000, HFPLL, 1, 0,  50 }, LVL_NOM, 1050000, 4 },
+	[10] = { { 1036800, HFPLL, 1, 0,  54 }, LVL_NOM, 1050000, 4 },
 };
 
 static struct acpu_level acpu_freq_tbl[] __initdata = {
-	{ 0, {STBY_KHZ, QSB,   0, 0,   0 }, L2(0),  1050000 },
-	{ 1, {  300000, PLL_0, 0, 2,   0 }, L2(1),  1050000 },
-	{ 1, {  384000, HFPLL, 2, 0,  40 }, L2(2),  1050000 },
-	{ 1, {  460800, HFPLL, 2, 0,  48 }, L2(3),  1050000 },
-	{ 1, {  537600, HFPLL, 1, 0,  28 }, L2(4),  1050000 },
-	{ 1, {  576000, HFPLL, 1, 0,  30 }, L2(5),  1050000 },
-	{ 1, {  652800, HFPLL, 1, 0,  34 }, L2(6),  1050000 },
-	{ 1, {  729600, HFPLL, 1, 0,  38 }, L2(7),  1050000 },
-	{ 1, {  806400, HFPLL, 1, 0,  42 }, L2(8),  1050000 },
-	{ 1, {  883200, HFPLL, 1, 0,  46 }, L2(9),  1050000 },
-	{ 1, {  960000, HFPLL, 1, 0,  50 }, L2(10), 1050000 },
-	{ 1, { 1036800, HFPLL, 1, 0,  54 }, L2(11), 1050000 },
+	{ 1, {  300000, PLL_0, 0, 2,   0 }, L2(0),  1050000, 3200000 },
+	{ 1, {  384000, HFPLL, 2, 0,  40 }, L2(1),  1050000, 3200000 },
+	{ 1, {  460800, HFPLL, 2, 0,  48 }, L2(2),  1050000, 3200000 },
+	{ 1, {  537600, HFPLL, 1, 0,  28 }, L2(3),  1050000, 3200000 },
+	{ 1, {  576000, HFPLL, 1, 0,  30 }, L2(4),  1050000, 3200000 },
+	{ 1, {  652800, HFPLL, 1, 0,  34 }, L2(5),  1050000, 3200000 },
+	{ 1, {  729600, HFPLL, 1, 0,  38 }, L2(6),  1050000, 3200000 },
+	{ 1, {  806400, HFPLL, 1, 0,  42 }, L2(7),  1050000, 3200000 },
+	{ 1, {  883200, HFPLL, 1, 0,  46 }, L2(8),  1050000, 3200000 },
+	{ 1, {  960000, HFPLL, 1, 0,  50 }, L2(9),  1050000, 3200000 },
+	{ 1, { 1036800, HFPLL, 1, 0,  54 }, L2(10), 1050000, 3200000 },
 	{ 0, { 0 } }
 };
 
@@ -148,6 +152,7 @@
 	.l2_freq_tbl_size = sizeof(l2_freq_tbl),
 	.bus_scale = &bus_scale_data,
 	.qfprom_phys_base = 0xFC4A8000,
+	.stby_khz = 300000,
 };
 
 static int __init acpuclk_8974_probe(struct platform_device *pdev)
diff --git a/arch/arm/mach-msm/acpuclock-krait.c b/arch/arm/mach-msm/acpuclock-krait.c
index fd43f57..33396e5 100644
--- a/arch/arm/mach-msm/acpuclock-krait.c
+++ b/arch/arm/mach-msm/acpuclock-krait.c
@@ -11,9 +11,8 @@
  * GNU General Public License for more details.
  */
 
-#define pr_fmt(fmt) "%s: " fmt, __func__
-
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/delay.h>
@@ -42,7 +41,6 @@
 #define PRI_SRC_SEL_SEC_SRC	0
 #define PRI_SRC_SEL_HFPLL	1
 #define PRI_SRC_SEL_HFPLL_DIV2	2
-#define SEC_SRC_SEL_QSB		0
 #define SEC_SRC_SEL_L2PLL	1
 #define SEC_SRC_SEL_AUX		2
 
@@ -54,12 +52,12 @@
 
 static struct drv_data {
 	struct acpu_level *acpu_freq_tbl;
-	const struct acpu_level *max_acpu_lvl;
 	const struct l2_level *l2_freq_tbl;
 	struct scalable *scalable;
 	struct hfpll_data *hfpll_data;
 	u32 bus_perf_client;
 	struct msm_bus_scale_pdata *bus_scale;
+	int boost_uv;
 	struct device *dev;
 } drv;
 
@@ -171,8 +169,19 @@
 /* Program the HFPLL rate. Assumes HFPLL is already disabled. */
 static void hfpll_set_rate(struct scalable *sc, const struct core_speed *tgt_s)
 {
-	writel_relaxed(tgt_s->pll_l_val,
-		sc->hfpll_base + drv.hfpll_data->l_offset);
+	void __iomem *base = sc->hfpll_base;
+	u32 regval;
+
+	writel_relaxed(tgt_s->pll_l_val, base + drv.hfpll_data->l_offset);
+
+	if (drv.hfpll_data->has_user_reg) {
+		regval = readl_relaxed(base + drv.hfpll_data->user_offset);
+		if (tgt_s->pll_l_val <= drv.hfpll_data->low_vco_l_max)
+			regval &= ~drv.hfpll_data->user_vco_mask;
+		else
+			regval |= drv.hfpll_data->user_vco_mask;
+		writel_relaxed(regval, base  + drv.hfpll_data->user_offset);
+	}
 }
 
 /* Return the L2 speed that should be applied. */
@@ -235,40 +244,59 @@
 	sc->cur_speed = tgt_s;
 }
 
+struct vdd_data {
+	int vdd_mem;
+	int vdd_dig;
+	int vdd_core;
+	int ua_core;
+};
+
 /* Apply any per-cpu voltage increases. */
-static int increase_vdd(int cpu, int vdd_core, int vdd_mem, int vdd_dig,
+static int increase_vdd(int cpu, struct vdd_data *data,
 			enum setrate_reason reason)
 {
 	struct scalable *sc = &drv.scalable[cpu];
-	int rc = 0;
+	int rc;
 
 	/*
 	 * Increase vdd_mem active-set before vdd_dig.
 	 * vdd_mem should be >= vdd_dig.
 	 */
-	if (vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
+	if (data->vdd_mem > sc->vreg[VREG_MEM].cur_vdd) {
 		rc = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
-				vdd_mem, sc->vreg[VREG_MEM].max_vdd);
+				data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
 		if (rc) {
 			dev_err(drv.dev,
 				"vdd_mem (cpu%d) increase failed (%d)\n",
 				cpu, rc);
 			return rc;
 		}
-		 sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
+		 sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
 	}
 
 	/* Increase vdd_dig active-set vote. */
-	if (vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
+	if (data->vdd_dig > sc->vreg[VREG_DIG].cur_vdd) {
 		rc = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
-				vdd_dig, sc->vreg[VREG_DIG].max_vdd);
+				data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
 		if (rc) {
 			dev_err(drv.dev,
 				"vdd_dig (cpu%d) increase failed (%d)\n",
 				cpu, rc);
 			return rc;
 		}
-		sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
+		sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
+	}
+
+	/* Increase current request. */
+	if (data->ua_core > sc->vreg[VREG_CORE].cur_ua) {
+		rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
+						data->ua_core);
+		if (rc < 0) {
+			dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
+				sc->vreg[VREG_CORE].name, rc);
+			return rc;
+		}
+		sc->vreg[VREG_CORE].cur_ua = data->ua_core;
 	}
 
 	/*
@@ -277,25 +305,25 @@
 	 * because we don't know what CPU we are running on at this point, but
 	 * the CPU regulator API requires we call it from the affected CPU.
 	 */
-	if (vdd_core > sc->vreg[VREG_CORE].cur_vdd
+	if (data->vdd_core > sc->vreg[VREG_CORE].cur_vdd
 			&& reason != SETRATE_HOTPLUG) {
-		rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
-					   sc->vreg[VREG_CORE].max_vdd);
+		rc = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
+				data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
 		if (rc) {
 			dev_err(drv.dev,
 				"vdd_core (cpu%d) increase failed (%d)\n",
 				cpu, rc);
 			return rc;
 		}
-		sc->vreg[VREG_CORE].cur_vdd = vdd_core;
+		sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
 	}
 
-	return rc;
+	return 0;
 }
 
 /* Apply any per-cpu voltage decreases. */
-static void decrease_vdd(int cpu, int vdd_core, int vdd_mem, int vdd_dig,
-			enum setrate_reason reason)
+static void decrease_vdd(int cpu, struct vdd_data *data,
+			 enum setrate_reason reason)
 {
 	struct scalable *sc = &drv.scalable[cpu];
 	int ret;
@@ -305,46 +333,58 @@
 	 * that's being affected. Don't do this in the hotplug remove path,
 	 * where the rail is off and we're executing on the other CPU.
 	 */
-	if (vdd_core < sc->vreg[VREG_CORE].cur_vdd
+	if (data->vdd_core < sc->vreg[VREG_CORE].cur_vdd
 			&& reason != SETRATE_HOTPLUG) {
-		ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
-					    sc->vreg[VREG_CORE].max_vdd);
+		ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg,
+				data->vdd_core, sc->vreg[VREG_CORE].max_vdd);
 		if (ret) {
 			dev_err(drv.dev,
 				"vdd_core (cpu%d) decrease failed (%d)\n",
 				cpu, ret);
 			return;
 		}
-		sc->vreg[VREG_CORE].cur_vdd = vdd_core;
+		sc->vreg[VREG_CORE].cur_vdd = data->vdd_core;
+	}
+
+	/* Decrease current request. */
+	if (data->ua_core < sc->vreg[VREG_CORE].cur_ua) {
+		ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
+						data->ua_core);
+		if (ret < 0) {
+			dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
+				sc->vreg[VREG_CORE].name, ret);
+			return;
+		}
+		sc->vreg[VREG_CORE].cur_ua = data->ua_core;
 	}
 
 	/* Decrease vdd_dig active-set vote. */
-	if (vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
+	if (data->vdd_dig < sc->vreg[VREG_DIG].cur_vdd) {
 		ret = rpm_regulator_set_voltage(sc->vreg[VREG_DIG].rpm_reg,
-				vdd_dig, sc->vreg[VREG_DIG].max_vdd);
+				data->vdd_dig, sc->vreg[VREG_DIG].max_vdd);
 		if (ret) {
 			dev_err(drv.dev,
 				"vdd_dig (cpu%d) decrease failed (%d)\n",
 				cpu, ret);
 			return;
 		}
-		sc->vreg[VREG_DIG].cur_vdd = vdd_dig;
+		sc->vreg[VREG_DIG].cur_vdd = data->vdd_dig;
 	}
 
 	/*
 	 * Decrease vdd_mem active-set after vdd_dig.
 	 * vdd_mem should be >= vdd_dig.
 	 */
-	if (vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
+	if (data->vdd_mem < sc->vreg[VREG_MEM].cur_vdd) {
 		ret = rpm_regulator_set_voltage(sc->vreg[VREG_MEM].rpm_reg,
-				vdd_mem, sc->vreg[VREG_MEM].max_vdd);
+				data->vdd_mem, sc->vreg[VREG_MEM].max_vdd);
 		if (ret) {
 			dev_err(drv.dev,
 				"vdd_mem (cpu%d) decrease failed (%d)\n",
 				cpu, ret);
 			return;
 		}
-		 sc->vreg[VREG_MEM].cur_vdd = vdd_mem;
+		sc->vreg[VREG_MEM].cur_vdd = data->vdd_mem;
 	}
 }
 
@@ -353,25 +393,39 @@
 	return drv.l2_freq_tbl[tgt->l2_level].vdd_mem;
 }
 
-static int calculate_vdd_dig(const struct acpu_level *tgt)
+static int get_src_dig(const struct core_speed *s)
 {
-	int pll_vdd_dig;
 	const int *hfpll_vdd = drv.hfpll_data->vdd;
 	const u32 low_vdd_l_max = drv.hfpll_data->low_vdd_l_max;
+	const u32 nom_vdd_l_max = drv.hfpll_data->nom_vdd_l_max;
 
-	if (drv.l2_freq_tbl[tgt->l2_level].speed.src != HFPLL)
-		pll_vdd_dig = hfpll_vdd[HFPLL_VDD_NONE];
-	else if (drv.l2_freq_tbl[tgt->l2_level].speed.pll_l_val > low_vdd_l_max)
-		pll_vdd_dig = hfpll_vdd[HFPLL_VDD_NOM];
+	if (s->src != HFPLL)
+		return hfpll_vdd[HFPLL_VDD_NONE];
+	else if (s->pll_l_val > nom_vdd_l_max)
+		return hfpll_vdd[HFPLL_VDD_HIGH];
+	else if (s->pll_l_val > low_vdd_l_max)
+		return hfpll_vdd[HFPLL_VDD_NOM];
 	else
-		pll_vdd_dig = hfpll_vdd[HFPLL_VDD_LOW];
-
-	return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig, pll_vdd_dig);
+		return hfpll_vdd[HFPLL_VDD_LOW];
 }
 
+static int calculate_vdd_dig(const struct acpu_level *tgt)
+{
+	int l2_pll_vdd_dig, cpu_pll_vdd_dig;
+
+	l2_pll_vdd_dig = get_src_dig(&drv.l2_freq_tbl[tgt->l2_level].speed);
+	cpu_pll_vdd_dig = get_src_dig(&tgt->speed);
+
+	return max(drv.l2_freq_tbl[tgt->l2_level].vdd_dig,
+		   max(l2_pll_vdd_dig, cpu_pll_vdd_dig));
+}
+
+static bool enable_boost = true;
+module_param_named(boost, enable_boost, bool, S_IRUGO | S_IWUSR);
+
 static int calculate_vdd_core(const struct acpu_level *tgt)
 {
-	return tgt->vdd_core;
+	return tgt->vdd_core + (enable_boost ? drv.boost_uv : 0);
 }
 
 /* Set the CPU's clock rate and adjust the L2 rate, voltage and BW requests. */
@@ -381,7 +435,7 @@
 	const struct core_speed *strt_acpu_s, *tgt_acpu_s;
 	const struct acpu_level *tgt;
 	int tgt_l2_l;
-	int vdd_mem, vdd_dig, vdd_core;
+	struct vdd_data vdd_data;
 	unsigned long flags;
 	int rc = 0;
 
@@ -410,19 +464,20 @@
 	}
 
 	/* Calculate voltage requirements for the current CPU. */
-	vdd_mem  = calculate_vdd_mem(tgt);
-	vdd_dig  = calculate_vdd_dig(tgt);
-	vdd_core = calculate_vdd_core(tgt);
+	vdd_data.vdd_mem  = calculate_vdd_mem(tgt);
+	vdd_data.vdd_dig  = calculate_vdd_dig(tgt);
+	vdd_data.vdd_core = calculate_vdd_core(tgt);
+	vdd_data.ua_core = tgt->ua_core;
 
 	/* Increase VDD levels if needed. */
 	if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG) {
-		rc = increase_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
+		rc = increase_vdd(cpu, &vdd_data, reason);
 		if (rc)
 			goto out;
 	}
 
-	pr_debug("Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
-		 cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
+	dev_dbg(drv.dev, "Switching from ACPU%d rate %lu KHz -> %lu KHz\n",
+		cpu, strt_acpu_s->khz, tgt_acpu_s->khz);
 
 	/* Set the new CPU speed. */
 	set_speed(&drv.scalable[cpu], tgt_acpu_s);
@@ -447,9 +502,9 @@
 	set_bus_bw(drv.l2_freq_tbl[tgt_l2_l].bw_level);
 
 	/* Drop VDD levels if we can. */
-	decrease_vdd(cpu, vdd_core, vdd_mem, vdd_dig, reason);
+	decrease_vdd(cpu, &vdd_data, reason);
 
-	pr_debug("ACPU%d speed change complete\n", cpu);
+	dev_dbg(drv.dev, "ACPU%d speed change complete\n", cpu);
 
 out:
 	if (reason == SETRATE_CPUFREQ || reason == SETRATE_HOTPLUG)
@@ -457,11 +512,16 @@
 	return rc;
 }
 
+static struct acpuclk_data acpuclk_krait_data = {
+	.set_rate = acpuclk_krait_set_rate,
+	.get_rate = acpuclk_krait_get_rate,
+};
+
 /* Initialize a HFPLL at a given rate and enable it. */
 static void __init hfpll_init(struct scalable *sc,
 			      const struct core_speed *tgt_s)
 {
-	pr_debug("Initializing HFPLL%d\n", sc - drv.scalable);
+	dev_dbg(drv.dev, "Initializing HFPLL%d\n", sc - drv.scalable);
 
 	/* Disable the PLL for re-programming. */
 	hfpll_disable(sc, true);
@@ -471,6 +531,9 @@
 		       sc->hfpll_base + drv.hfpll_data->config_offset);
 	writel_relaxed(0, sc->hfpll_base + drv.hfpll_data->m_offset);
 	writel_relaxed(1, sc->hfpll_base + drv.hfpll_data->n_offset);
+	if (drv.hfpll_data->has_user_reg)
+		writel_relaxed(drv.hfpll_data->user_val,
+			       sc->hfpll_base + drv.hfpll_data->user_offset);
 
 	/* Program droop controller, if supported */
 	if (drv.hfpll_data->has_droop_ctl)
@@ -533,19 +596,21 @@
 }
 
 /* Voltage regulator initialization. */
-static int __cpuinit regulator_init(struct scalable *sc)
+static int __cpuinit regulator_init(struct scalable *sc,
+				const struct acpu_level *acpu_level)
 {
 	int ret, vdd_mem, vdd_dig, vdd_core;
 
-	vdd_mem = calculate_vdd_mem(drv.max_acpu_lvl);
-	vdd_dig = calculate_vdd_dig(drv.max_acpu_lvl);
-
+	vdd_mem = calculate_vdd_mem(acpu_level);
 	ret = rpm_regulator_init(sc, VREG_MEM, vdd_mem, true);
 	if (ret)
 		goto err_mem;
+
+	vdd_dig = calculate_vdd_dig(acpu_level);
 	ret = rpm_regulator_init(sc, VREG_DIG, vdd_dig, true);
 	if (ret)
 		goto err_dig;
+
 	ret = rpm_regulator_init(sc, VREG_HFPLL_A,
 			   sc->vreg[VREG_HFPLL_A].max_vdd, false);
 	if (ret)
@@ -564,7 +629,15 @@
 			sc->vreg[VREG_CORE].name, ret);
 		goto err_core_get;
 	}
-	vdd_core = calculate_vdd_core(drv.max_acpu_lvl);
+	ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
+					 acpu_level->ua_core);
+	if (ret < 0) {
+		dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
+			sc->vreg[VREG_CORE].name, ret);
+		goto err_core_conf;
+	}
+	sc->vreg[VREG_CORE].cur_ua = acpu_level->ua_core;
+	vdd_core = calculate_vdd_core(acpu_level);
 	ret = regulator_set_voltage(sc->vreg[VREG_CORE].reg, vdd_core,
 				    sc->vreg[VREG_CORE].max_vdd);
 	if (ret) {
@@ -573,13 +646,6 @@
 		goto err_core_conf;
 	}
 	sc->vreg[VREG_CORE].cur_vdd = vdd_core;
-	ret = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
-					 sc->vreg[VREG_CORE].peak_ua);
-	if (ret < 0) {
-		dev_err(drv.dev, "regulator_set_optimum_mode(%s) failed (%d)\n",
-			sc->vreg[VREG_CORE].name, ret);
-		goto err_core_conf;
-	}
 	ret = regulator_enable(sc->vreg[VREG_CORE].reg);
 	if (ret) {
 		dev_err(drv.dev, "regulator_enable(%s) failed (%d)\n",
@@ -647,9 +713,63 @@
 	return 0;
 }
 
+static void __cpuinit fill_cur_core_speed(struct core_speed *s,
+					  struct scalable *sc)
+{
+	s->pri_src_sel = get_l2_indirect_reg(sc->l2cpmr_iaddr) & 0x3;
+	s->sec_src_sel = (get_l2_indirect_reg(sc->l2cpmr_iaddr) >> 2) & 0x3;
+	s->pll_l_val = readl_relaxed(sc->hfpll_base + drv.hfpll_data->l_offset);
+}
+
+static bool __cpuinit speed_equal(const struct core_speed *s1,
+				  const struct core_speed *s2)
+{
+	return (s1->pri_src_sel == s2->pri_src_sel &&
+		s1->sec_src_sel == s2->sec_src_sel &&
+		s1->pll_l_val == s2->pll_l_val);
+}
+
+static const struct acpu_level __cpuinit *find_cur_acpu_level(int cpu)
+{
+	struct scalable *sc = &drv.scalable[cpu];
+	const struct acpu_level *l;
+	struct core_speed cur_speed;
+
+	fill_cur_core_speed(&cur_speed, sc);
+	for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
+		if (speed_equal(&l->speed, &cur_speed))
+			return l;
+	return NULL;
+}
+
+static const struct l2_level __init *find_cur_l2_level(void)
+{
+	struct scalable *sc = &drv.scalable[L2];
+	const struct l2_level *l;
+	struct core_speed cur_speed;
+
+	fill_cur_core_speed(&cur_speed, sc);
+	for (l = drv.l2_freq_tbl; l->speed.khz != 0; l++)
+		if (speed_equal(&l->speed, &cur_speed))
+			return l;
+	return NULL;
+}
+
+static const struct acpu_level __cpuinit *find_min_acpu_level(void)
+{
+	struct acpu_level *l;
+
+	for (l = drv.acpu_freq_tbl; l->speed.khz != 0; l++)
+		if (l->use_for_scaling)
+			return l;
+
+	return NULL;
+}
+
 static int __cpuinit per_cpu_init(int cpu)
 {
 	struct scalable *sc = &drv.scalable[cpu];
+	const struct acpu_level *acpu_level;
 	int ret;
 
 	sc->hfpll_base = ioremap(sc->hfpll_phys_base, SZ_32);
@@ -658,14 +778,29 @@
 		goto err_ioremap;
 	}
 
-	ret = regulator_init(sc);
+	acpu_level = find_cur_acpu_level(cpu);
+	if (!acpu_level) {
+		acpu_level = find_min_acpu_level();
+		if (!acpu_level) {
+			ret = -ENODEV;
+			goto err_table;
+		}
+		dev_dbg(drv.dev, "CPU%d is running at an unknown rate. Defaulting to %lu KHz.\n",
+			cpu, acpu_level->speed.khz);
+	} else {
+		dev_dbg(drv.dev, "CPU%d is running at %lu KHz\n", cpu,
+			acpu_level->speed.khz);
+	}
+
+	ret = regulator_init(sc, acpu_level);
 	if (ret)
 		goto err_regulators;
 
-	ret = init_clock_sources(sc, &drv.max_acpu_lvl->speed);
+	ret = init_clock_sources(sc, &acpu_level->speed);
 	if (ret)
 		goto err_clocks;
-	sc->l2_vote = drv.max_acpu_lvl->l2_level;
+
+	sc->l2_vote = acpu_level->l2_level;
 	sc->initialized = true;
 
 	return 0;
@@ -673,13 +808,14 @@
 err_clocks:
 	regulator_cleanup(sc);
 err_regulators:
+err_table:
 	iounmap(sc->hfpll_base);
 err_ioremap:
 	return ret;
 }
 
 /* Register with bus driver. */
-static void __init bus_init(void)
+static void __init bus_init(const struct l2_level *l2_level)
 {
 	int ret;
 
@@ -690,7 +826,7 @@
 	}
 
 	ret = msm_bus_scale_client_update_request(drv.bus_perf_client,
-			drv.l2_freq_tbl[drv.max_acpu_lvl->l2_level].bw_level);
+			l2_level->bw_level);
 	if (ret)
 		dev_err(drv.dev, "initial bandwidth req failed (%d)\n", ret);
 }
@@ -731,20 +867,20 @@
 static void __init cpufreq_table_init(void) {}
 #endif
 
-#define HOT_UNPLUG_KHZ STBY_KHZ
 static int __cpuinit acpuclk_cpu_callback(struct notifier_block *nfb,
 					    unsigned long action, void *hcpu)
 {
 	static int prev_khz[NR_CPUS];
 	int rc, cpu = (int)hcpu;
 	struct scalable *sc = &drv.scalable[cpu];
+	unsigned long hot_unplug_khz = acpuclk_krait_data.power_collapse_khz;
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_DEAD:
 		prev_khz[cpu] = acpuclk_krait_get_rate(cpu);
 		/* Fall through. */
 	case CPU_UP_CANCELED:
-		acpuclk_krait_set_rate(cpu, HOT_UNPLUG_KHZ, SETRATE_HOTPLUG);
+		acpuclk_krait_set_rate(cpu, hot_unplug_khz, SETRATE_HOTPLUG);
 		regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg, 0);
 		break;
 	case CPU_UP_PREPARE:
@@ -757,7 +893,7 @@
 		if (WARN_ON(!prev_khz[cpu]))
 			return NOTIFY_BAD;
 		rc = regulator_set_optimum_mode(sc->vreg[VREG_CORE].reg,
-						sc->vreg[VREG_CORE].peak_ua);
+						sc->vreg[VREG_CORE].cur_ua);
 		if (rc < 0)
 			return NOTIFY_BAD;
 		acpuclk_krait_set_rate(cpu, prev_khz[cpu], SETRATE_HOTPLUG);
@@ -840,27 +976,6 @@
 	return tbl_idx;
 }
 
-static const struct acpu_level __init *find_max_acpu_lvl(struct acpu_level *tbl)
-{
-	struct acpu_level *l, *max_lvl = NULL;
-
-	for (l = tbl; l->speed.khz != 0; l++)
-		if (l->use_for_scaling)
-			max_lvl = l;
-
-	BUG_ON(!max_lvl);
-	dev_info(drv.dev, "Max CPU freq: %lu KHz\n", max_lvl->speed.khz);
-
-	return max_lvl;
-}
-
-static struct acpuclk_data acpuclk_krait_data = {
-	.set_rate = acpuclk_krait_set_rate,
-	.get_rate = acpuclk_krait_get_rate,
-	.power_collapse_khz = STBY_KHZ,
-	.wait_for_irq_khz = STBY_KHZ,
-};
-
 static void __init drv_data_init(struct device *dev,
 				 const struct acpuclk_krait_params *params)
 {
@@ -892,20 +1007,21 @@
 				    params->pvs_tables[tbl_idx].size,
 				    GFP_KERNEL);
 	BUG_ON(!drv.acpu_freq_tbl);
+	drv.boost_uv = params->pvs_tables[tbl_idx].boost_uv;
 
-	drv.max_acpu_lvl = find_max_acpu_lvl(drv.acpu_freq_tbl);
+	acpuclk_krait_data.power_collapse_khz = params->stby_khz;
+	acpuclk_krait_data.wait_for_irq_khz = params->stby_khz;
 }
 
 static void __init hw_init(void)
 {
 	struct scalable *l2 = &drv.scalable[L2];
+	const struct l2_level *l2_level;
 	int cpu, rc;
 
 	if (krait_needs_vmin())
 		krait_apply_vmin(drv.acpu_freq_tbl);
 
-	bus_init();
-
 	l2->hfpll_base = ioremap(l2->hfpll_phys_base, SZ_32);
 	BUG_ON(!l2->hfpll_base);
 
@@ -915,14 +1031,26 @@
 	rc = rpm_regulator_init(l2, VREG_HFPLL_B,
 				l2->vreg[VREG_HFPLL_B].max_vdd, false);
 	BUG_ON(rc);
-	rc = init_clock_sources(l2,
-			&drv.l2_freq_tbl[drv.max_acpu_lvl->l2_level].speed);
+
+	l2_level = find_cur_l2_level();
+	if (!l2_level) {
+		l2_level = drv.l2_freq_tbl;
+		dev_dbg(drv.dev, "L2 is running at an unknown rate. Defaulting to %lu KHz.\n",
+			l2_level->speed.khz);
+	} else {
+		dev_dbg(drv.dev, "L2 is running at %lu KHz\n",
+			l2_level->speed.khz);
+	}
+
+	rc = init_clock_sources(l2, &l2_level->speed);
 	BUG_ON(rc);
 
 	for_each_online_cpu(cpu) {
 		rc = per_cpu_init(cpu);
 		BUG_ON(rc);
 	}
+
+	bus_init(l2_level);
 }
 
 int __init acpuclk_krait_init(struct device *dev,
diff --git a/arch/arm/mach-msm/acpuclock-krait.h b/arch/arm/mach-msm/acpuclock-krait.h
index f92aaf3..5a95e76 100644
--- a/arch/arm/mach-msm/acpuclock-krait.h
+++ b/arch/arm/mach-msm/acpuclock-krait.h
@@ -14,7 +14,6 @@
 #ifndef __ARCH_ARM_MACH_MSM_ACPUCLOCK_KRAIT_H
 #define __ARCH_ARM_MACH_MSM_ACPUCLOCK_KRAIT_H
 
-#define STBY_KHZ		1
 #define L2(x) (x)
 #define BW_MBPS(_bw) \
 	{ \
@@ -39,7 +38,6 @@
 enum src_id {
 	PLL_0 = 0,
 	HFPLL,
-	QSB,
 	PLL_8,
 };
 
@@ -74,6 +72,7 @@
 	HFPLL_VDD_NONE,
 	HFPLL_VDD_LOW,
 	HFPLL_VDD_NOM,
+	HFPLL_VDD_HIGH,
 	NUM_HFPLL_VDD
 };
 
@@ -96,15 +95,15 @@
  * @reg: Regulator handle.
  * @rpm_reg: RPM Regulator handle.
  * @cur_vdd: Last-set voltage in uV.
- * @peak_ua: Maximum current draw expected in uA.
+ * @cur_ua: Last-set current in uA.
  */
 struct vreg {
 	const char *name;
 	const int max_vdd;
-	const int peak_ua;
 	struct regulator *reg;
 	struct rpm_regulator *rpm_reg;
 	int cur_vdd;
+	int cur_ua;
 };
 
 /**
@@ -116,11 +115,11 @@
  * @pll_l_val: HFPLL "L" value to be applied when an HFPLL source is selected.
  */
 struct core_speed {
-	const unsigned long khz;
-	const int src;
-	const u32 pri_src_sel;
-	const u32 sec_src_sel;
-	const u32 pll_l_val;
+	unsigned long khz;
+	int src;
+	u32 pri_src_sel;
+	u32 sec_src_sel;
+	u32 pll_l_val;
 };
 
 /**
@@ -143,12 +142,14 @@
  * @speed: CPU clock configuration.
  * @l2_level: L2 configuration to use.
  * @vdd_core: CPU core voltage in uV.
+ * @ua_core: CPU core current consumption in uA.
  */
 struct acpu_level {
 	const int use_for_scaling;
 	const struct core_speed speed;
 	const unsigned int l2_level;
 	int vdd_core;
+	int ua_core;
 };
 
 /**
@@ -159,10 +160,16 @@
  * @n_offset: "N" value register offset from base address.
  * @config_offset: Configuration register offset from base address.
  * @config_val: Value to initialize the @config_offset register to.
+ * @has_user_reg: Indicates the presence of an addition config register.
+ * @user_offset: User register offset from base address, if applicable.
+ * @user_val: Value to initialize the @user_offset register to.
+ * @user_vco_mask: Bit in the @user_offset to enable high-frequency VCO mode.
  * @has_droop_ctl: Indicates the presence of a voltage droop controller.
  * @droop_offset: Droop controller register offset from base address.
  * @droop_val: Value to initialize the @config_offset register to.
  * @low_vdd_l_max: Maximum "L" value supported at HFPLL_VDD_LOW.
+ * @nom_vdd_l_max: Maximum "L" value supported at HFPLL_VDD_NOM.
+ * @low_vco_l_max: Maximum "L" value supported in low-frequency VCO mode.
  * @vdd: voltage requirements for each VDD level for the L2 PLL.
  */
 struct hfpll_data {
@@ -172,10 +179,16 @@
 	const u32 n_offset;
 	const u32 config_offset;
 	const u32 config_val;
+	const bool has_user_reg;
+	const u32 user_offset;
+	const u32 user_val;
+	const u32 user_vco_mask;
 	const bool has_droop_ctl;
 	const u32 droop_offset;
 	const u32 droop_val;
 	const u32 low_vdd_l_max;
+	const u32 nom_vdd_l_max;
+	const u32 low_vco_l_max;
 	const int vdd[NUM_HFPLL_VDD];
 };
 
@@ -207,10 +220,12 @@
  * struct pvs_table - CPU performance level table and size.
  * @table: CPU performance level table
  * @size: sizeof(@table)
+ * @boost_uv: Voltage boost amount
  */
 struct pvs_table {
 	struct acpu_level *table;
 	size_t size;
+	int boost_uv;
 };
 
 /**
@@ -223,6 +238,7 @@
  * @l2_freq_tbl_size: Size of @l2_freq_tbl.
  * @qfprom_phys_base: Physical base address of QFPROM.
  * @bus_scale: MSM bus driver parameters.
+ * @stby_khz: KHz value corresponding to an always-on clock source.
  */
 struct acpuclk_krait_params {
 	struct scalable *scalable;
@@ -233,6 +249,7 @@
 	size_t l2_freq_tbl_size;
 	phys_addr_t qfprom_phys_base;
 	struct msm_bus_scale_pdata *bus_scale;
+	unsigned long stby_khz;
 };
 
 /**
diff --git a/arch/arm/mach-msm/acpuclock.h b/arch/arm/mach-msm/acpuclock.h
index e73a2af..841f717 100644
--- a/arch/arm/mach-msm/acpuclock.h
+++ b/arch/arm/mach-msm/acpuclock.h
@@ -90,4 +90,4 @@
  */
 void acpuclk_register(struct acpuclk_data *data);
 
-#endif
+#endif /*__ARCH_ARM_MACH_MSM_ACPUCLOCK_H*/
diff --git a/arch/arm/mach-msm/bam_dmux.c b/arch/arm/mach-msm/bam_dmux.c
index 5aea0ed..b35e949 100644
--- a/arch/arm/mach-msm/bam_dmux.c
+++ b/arch/arm/mach-msm/bam_dmux.c
@@ -283,6 +283,9 @@
 static int bam_dmux_uplink_vote;
 static int bam_dmux_power_state;
 
+static void bam_dmux_log(const char *fmt, ...)
+					__printf(1, 2);
+
 
 #define DMUX_LOG_KERR(fmt...) \
 do { \
@@ -569,7 +572,8 @@
 				rx_hdr->ch_id);
 		handle_bam_mux_cmd_open(rx_hdr);
 		if (!(rx_hdr->reserved & ENABLE_DISCONNECT_ACK)) {
-			bam_dmux_log("%s: deactivating disconnect ack\n");
+			bam_dmux_log("%s: deactivating disconnect ack\n",
+								__func__);
 			disconnect_ack = 0;
 		}
 		dev_kfree_skb_any(rx_skb);
diff --git a/arch/arm/mach-msm/board-8064-gpu.c b/arch/arm/mach-msm/board-8064-gpu.c
index eb36a81e..122505e 100644
--- a/arch/arm/mach-msm/board-8064-gpu.c
+++ b/arch/arm/mach-msm/board-8064-gpu.c
@@ -225,6 +225,7 @@
 	.set_grp_async = NULL,
 	.idle_timeout = HZ/10,
 	.nap_allowed = true,
+	.strtstp_sleepwake = true,
 	.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
 #ifdef CONFIG_MSM_BUS_SCALING
 	.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8064-pmic.c b/arch/arm/mach-msm/board-8064-pmic.c
index 43a79b5..678eb9e 100644
--- a/arch/arm/mach-msm/board-8064-pmic.c
+++ b/arch/arm/mach-msm/board-8064-pmic.c
@@ -439,10 +439,7 @@
 	apq8064_pm8921_platform_data.num_regulators =
 					msm8064_pm8921_regulator_pdata_len;
 
-	if (machine_is_apq8064_rumi3()) {
-		apq8064_pm8921_irq_pdata.devirq = 0;
-		apq8064_pm8821_irq_pdata.devirq = 0;
-	} else if (machine_is_apq8064_mtp()) {
+	if (machine_is_apq8064_mtp()) {
 		apq8064_pm8921_bms_pdata.battery_type = BATT_PALLADIUM;
 	} else if (machine_is_apq8064_liquid()) {
 		apq8064_pm8921_bms_pdata.battery_type = BATT_DESAY;
diff --git a/arch/arm/mach-msm/board-8064-storage.c b/arch/arm/mach-msm/board-8064-storage.c
index 13d8b3b..c81a637 100644
--- a/arch/arm/mach-msm/board-8064-storage.c
+++ b/arch/arm/mach-msm/board-8064-storage.c
@@ -331,21 +331,6 @@
 
 void __init apq8064_init_mmc(void)
 {
-	if ((machine_is_apq8064_rumi3()) || machine_is_apq8064_sim()) {
-		if (apq8064_sdc1_pdata) {
-			if (machine_is_apq8064_sim())
-				apq8064_sdc1_pdata->disable_bam = true;
-			apq8064_sdc1_pdata->disable_runtime_pm = true;
-			apq8064_sdc1_pdata->disable_cmd23 = true;
-		}
-		if (apq8064_sdc3_pdata) {
-			if (machine_is_apq8064_sim())
-				apq8064_sdc3_pdata->disable_bam = true;
-			apq8064_sdc3_pdata->disable_runtime_pm = true;
-			apq8064_sdc3_pdata->disable_cmd23 = true;
-		}
-	}
-
 	if (apq8064_sdc1_pdata)
 		apq8064_add_sdcc(1, apq8064_sdc1_pdata);
 
diff --git a/arch/arm/mach-msm/board-8064.c b/arch/arm/mach-msm/board-8064.c
index 3e07833..6317685 100644
--- a/arch/arm/mach-msm/board-8064.c
+++ b/arch/arm/mach-msm/board-8064.c
@@ -1751,10 +1751,10 @@
 
 static struct msm_thermal_data msm_thermal_pdata = {
 	.sensor_id = 7,
-	.poll_ms = 1000,
-	.limit_temp = 60,
-	.temp_hysteresis = 10,
-	.limit_freq = 918000,
+	.poll_ms = 250,
+	.limit_temp_degC = 60,
+	.temp_hysteresis_degC = 10,
+	.freq_step = 2,
 };
 
 #define MSM_SHARED_RAM_PHYS 0x80000000
@@ -2190,6 +2190,9 @@
 	&qseecom_device,
 #endif
 
+	&msm_8064_device_tsif[0],
+	&msm_8064_device_tsif[1],
+
 #if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \
 		defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE)
 	&qcrypto_device,
@@ -2253,7 +2256,6 @@
 	&msm_gss,
 	&apq8064_rtb_device,
 	&apq8064_cpu_idle_device,
-	&apq8064_msm_gov_device,
 	&apq8064_device_cache_erp,
 	&msm8960_device_ebi1_ch0_erp,
 	&msm8960_device_ebi1_ch1_erp,
@@ -2273,19 +2275,6 @@
 	&apq8064_cache_dump_device,
 };
 
-static struct platform_device *sim_devices[] __initdata = {
-	&apq8064_device_uart_gsbi3,
-	&msm_device_sps_apq8064,
-};
-
-static struct platform_device *rumi3_devices[] __initdata = {
-	&apq8064_device_uart_gsbi1,
-	&msm_device_sps_apq8064,
-#ifdef CONFIG_MSM_ROTATOR
-	&msm_rotator_device,
-#endif
-};
-
 static struct platform_device *cdp_devices[] __initdata = {
 	&apq8064_device_uart_gsbi1,
 	&apq8064_device_uart_gsbi7,
@@ -2847,10 +2836,6 @@
 		mach_mask = I2C_FFA;
 	else if (machine_is_apq8064_liquid())
 		mach_mask = I2C_LIQUID;
-	else if (machine_is_apq8064_rumi3())
-		mach_mask = I2C_RUMI;
-	else if (machine_is_apq8064_sim())
-		mach_mask = I2C_SIM;
 	else if (PLATFORM_IS_MPQ8064())
 		mach_mask = I2C_MPQ_CDP;
 	else
@@ -2967,25 +2952,6 @@
 	apq8064_allocate_fb_region();
 }
 
-static void __init apq8064_sim_init(void)
-{
-	struct msm_watchdog_pdata *wdog_pdata = (struct msm_watchdog_pdata *)
-		&msm8064_device_watchdog.dev.platform_data;
-
-	wdog_pdata->bark_time = 15000;
-	apq8064_common_init();
-	platform_add_devices(sim_devices, ARRAY_SIZE(sim_devices));
-}
-
-static void __init apq8064_rumi3_init(void)
-{
-	apq8064_common_init();
-	ethernet_init();
-	msm_rotator_set_split_iommu_domain();
-	platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
-	spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
-}
-
 static void __init apq8064_cdp_init(void)
 {
 	if (meminfo_init(SYS_MEMORY, SZ_256M) < 0)
@@ -3025,27 +2991,6 @@
 	}
 }
 
-MACHINE_START(APQ8064_SIM, "QCT APQ8064 SIMULATOR")
-	.map_io = apq8064_map_io,
-	.reserve = apq8064_reserve,
-	.init_irq = apq8064_init_irq,
-	.handle_irq = gic_handle_irq,
-	.timer = &msm_timer,
-	.init_machine = apq8064_sim_init,
-	.restart = msm_restart,
-MACHINE_END
-
-MACHINE_START(APQ8064_RUMI3, "QCT APQ8064 RUMI3")
-	.map_io = apq8064_map_io,
-	.reserve = apq8064_reserve,
-	.init_irq = apq8064_init_irq,
-	.handle_irq = gic_handle_irq,
-	.timer = &msm_timer,
-	.init_machine = apq8064_rumi3_init,
-	.init_early = apq8064_allocate_memory_regions,
-	.restart = msm_restart,
-MACHINE_END
-
 MACHINE_START(APQ8064_CDP, "QCT APQ8064 CDP")
 	.map_io = apq8064_map_io,
 	.reserve = apq8064_reserve,
diff --git a/arch/arm/mach-msm/board-8930-display.c b/arch/arm/mach-msm/board-8930-display.c
index 292c0316..d975997 100644
--- a/arch/arm/mach-msm/board-8930-display.c
+++ b/arch/arm/mach-msm/board-8930-display.c
@@ -482,16 +482,16 @@
 static struct mipi_dsi_phy_ctrl dsi_novatek_cmd_mode_phy_db = {
 
 /* DSI_BIT_CLK at 500MHz, 2 lane, RGB888 */
-	{0x0F, 0x0a, 0x04, 0x00, 0x20},	/* regulator */
+	{0x09, 0x08, 0x05, 0x00, 0x20},	/* regulator */
 	/* timing   */
 	{0xab, 0x8a, 0x18, 0x00, 0x92, 0x97, 0x1b, 0x8c,
 	0x0c, 0x03, 0x04, 0xa0},
 	{0x5f, 0x00, 0x00, 0x10},	/* phy ctrl */
 	{0xff, 0x00, 0x06, 0x00},	/* strength */
 	/* pll control */
-	{0x40, 0xf9, 0x30, 0xda, 0x00, 0x40, 0x03, 0x62,
+	{0x0, 0xe, 0x30, 0xda, 0x00, 0x10, 0x0f, 0x61,
 	0x40, 0x07, 0x03,
-	0x00, 0x1a, 0x00, 0x00, 0x02, 0x00, 0x20, 0x00, 0x01},
+	0x00, 0x1a, 0x00, 0x00, 0x02, 0x00, 0x20, 0x00, 0x02},
 };
 
 static struct mipi_dsi_panel_platform_data novatek_pdata = {
diff --git a/arch/arm/mach-msm/board-8930-gpu.c b/arch/arm/mach-msm/board-8930-gpu.c
index bd343c1..99a5a34 100644
--- a/arch/arm/mach-msm/board-8930-gpu.c
+++ b/arch/arm/mach-msm/board-8930-gpu.c
@@ -140,6 +140,7 @@
 	.set_grp_async = NULL,
 	.idle_timeout = HZ/12,
 	.nap_allowed = true,
+	.strtstp_sleepwake = true,
 	.clk_map = KGSL_CLK_CORE | KGSL_CLK_IFACE | KGSL_CLK_MEM_IFACE,
 #ifdef CONFIG_MSM_BUS_SCALING
 	.bus_scale_table = &grp3d_bus_scale_pdata,
diff --git a/arch/arm/mach-msm/board-8930-regulator.c b/arch/arm/mach-msm/board-8930-regulator.c
index af91089..d3a4960 100644
--- a/arch/arm/mach-msm/board-8930-regulator.c
+++ b/arch/arm/mach-msm/board-8930-regulator.c
@@ -174,11 +174,13 @@
 	REGULATOR_SUPPLY("8038_s5",		NULL),
 	REGULATOR_SUPPLY("krait0",		"acpuclk-8627"),
 	REGULATOR_SUPPLY("krait0",		"acpuclk-8930"),
+	REGULATOR_SUPPLY("krait0",		"acpuclk-8930aa"),
 };
 VREG_CONSUMERS(S6) = {
 	REGULATOR_SUPPLY("8038_s6",		NULL),
 	REGULATOR_SUPPLY("krait1",		"acpuclk-8627"),
 	REGULATOR_SUPPLY("krait1",		"acpuclk-8930"),
+	REGULATOR_SUPPLY("krait1",		"acpuclk-8930aa"),
 };
 VREG_CONSUMERS(LVS1) = {
 	REGULATOR_SUPPLY("8038_lvs1",		NULL),
@@ -538,6 +540,14 @@
 	RPM_REG_MAP(L24,            0, 2, "krait1_mem",   "acpuclk-8627"),
 	RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig",   "acpuclk-8627"),
 	RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig",   "acpuclk-8627"),
+
+	RPM_REG_MAP(L23,            0, 1, "krait0_hfpll", "acpuclk-8930aa"),
+	RPM_REG_MAP(L23,            0, 2, "krait1_hfpll", "acpuclk-8930aa"),
+	RPM_REG_MAP(L23,            0, 6, "l2_hfpll",     "acpuclk-8930aa"),
+	RPM_REG_MAP(L24,            0, 1, "krait0_mem",   "acpuclk-8930aa"),
+	RPM_REG_MAP(L24,            0, 2, "krait1_mem",   "acpuclk-8930aa"),
+	RPM_REG_MAP(VDD_DIG_CORNER, 0, 1, "krait0_dig",   "acpuclk-8930aa"),
+	RPM_REG_MAP(VDD_DIG_CORNER, 0, 2, "krait1_dig",   "acpuclk-8930aa"),
 };
 
 struct rpm_regulator_platform_data msm8930_rpm_regulator_pdata __devinitdata = {
diff --git a/arch/arm/mach-msm/board-8930.c b/arch/arm/mach-msm/board-8930.c
index d7a077c..fb20307 100644
--- a/arch/arm/mach-msm/board-8930.c
+++ b/arch/arm/mach-msm/board-8930.c
@@ -47,6 +47,10 @@
 #include <linux/mfd/wcd9xxx/core.h>
 #include <linux/mfd/wcd9xxx/pdata.h>
 
+#ifdef CONFIG_STM_LIS3DH
+#include <linux/input/lis3dh.h>
+#endif
+
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/setup.h>
@@ -2095,10 +2099,10 @@
 
 static struct msm_thermal_data msm_thermal_pdata = {
 	.sensor_id = 9,
-	.poll_ms = 1000,
-	.limit_temp = 60,
-	.temp_hysteresis = 10,
-	.limit_freq = 918000,
+	.poll_ms = 250,
+	.limit_temp_degC = 60,
+	.temp_hysteresis_degC = 10,
+	.freq_step = 2,
 };
 
 #ifdef CONFIG_MSM_FAKE_BATTERY
@@ -2226,6 +2230,7 @@
 #endif
 	&msm8930_rpm_device,
 	&msm8930_rpm_log_device,
+	&msm8930_rpm_rbcpr_device,
 	&msm8930_rpm_stat_device,
 #ifdef CONFIG_ION_MSM
 	&msm8930_ion_dev,
@@ -2451,6 +2456,31 @@
 };
 #endif /* CONFIG_ISL9519_CHARGER */
 
+#ifdef CONFIG_STM_LIS3DH
+static struct lis3dh_acc_platform_data lis3dh_accel = {
+	.poll_interval = 200,
+	.min_interval = 10,
+	.g_range = LIS3DH_ACC_G_2G,
+	.axis_map_x = 1,
+	.axis_map_y = 0,
+	.axis_map_z = 2,
+	.negate_x = 0,
+	.negate_y = 0,
+	.negate_z = 1,
+	.init = NULL,
+	.exit = NULL,
+	.gpio_int1 = -EINVAL,
+	.gpio_int2 = -EINVAL,
+};
+
+static struct i2c_board_info __initdata lis3dh_i2c_boardinfo[] = {
+	{
+		I2C_BOARD_INFO(LIS3DH_ACC_DEV_NAME, 0x18),
+		.platform_data = &lis3dh_accel,
+	},
+};
+#endif /* CONFIG_STM_LIS3DH */
+
 static struct i2c_registry msm8960_i2c_devices[] __initdata = {
 #ifdef CONFIG_ISL9519_CHARGER
 	{
@@ -2486,6 +2516,14 @@
 		sii_device_info,
 		ARRAY_SIZE(sii_device_info),
 	},
+#ifdef CONFIG_STM_LIS3DH
+	{
+		I2C_FFA | I2C_FLUID,
+		MSM_8930_GSBI12_QUP_I2C_BUS_ID,
+		lis3dh_i2c_boardinfo,
+		ARRAY_SIZE(lis3dh_i2c_boardinfo),
+	},
+#endif
 };
 #endif /* CONFIG_I2C */
 
@@ -2571,8 +2609,10 @@
 	platform_add_devices(msm8930_footswitch, msm8930_num_footswitch);
 	if (cpu_is_msm8627())
 		platform_device_register(&msm8627_device_acpuclk);
-	else if (cpu_is_msm8930() || cpu_is_msm8930aa())
+	else if (cpu_is_msm8930())
 		platform_device_register(&msm8930_device_acpuclk);
+	else if (cpu_is_msm8930aa())
+		platform_device_register(&msm8930aa_device_acpuclk);
 	platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
 	msm8930_add_vidc_device();
 	/*
diff --git a/arch/arm/mach-msm/board-8960-display.c b/arch/arm/mach-msm/board-8960-display.c
index 88827ab..ddeba32 100644
--- a/arch/arm/mach-msm/board-8960-display.c
+++ b/arch/arm/mach-msm/board-8960-display.c
@@ -615,16 +615,6 @@
 	return mdp_pdata.cont_splash_enabled;
 }
 
-static struct platform_device mipi_dsi_renesas_panel_device = {
-	.name = "mipi_renesas",
-	.id = 0,
-};
-
-static struct platform_device mipi_dsi_simulator_panel_device = {
-	.name = "mipi_simulator",
-	.id = 0,
-};
-
 #define LPM_CHANNEL0 0
 static int toshiba_gpio[] = {LPM_CHANNEL0};
 
@@ -1015,31 +1005,19 @@
 	platform_device_register(&wfd_device);
 #endif
 
-	if (machine_is_msm8960_sim())
-		platform_device_register(&mipi_dsi_simulator_panel_device);
-
-	if (machine_is_msm8960_rumi3())
-		platform_device_register(&mipi_dsi_renesas_panel_device);
-
-	if (!machine_is_msm8960_sim() && !machine_is_msm8960_rumi3()) {
-		platform_device_register(&mipi_dsi_novatek_panel_device);
-		platform_device_register(&mipi_dsi_orise_panel_device);
+	platform_device_register(&mipi_dsi_novatek_panel_device);
+	platform_device_register(&mipi_dsi_orise_panel_device);
 
 #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
-		platform_device_register(&hdmi_msm_device);
+	platform_device_register(&hdmi_msm_device);
 #endif
-	}
 
 	if (machine_is_msm8960_liquid())
 		platform_device_register(&mipi_dsi2lvds_bridge_device);
 	else
 		platform_device_register(&mipi_dsi_toshiba_panel_device);
 
-	if (machine_is_msm8x60_rumi3()) {
-		msm_fb_register_device("mdp", NULL);
-		mipi_dsi_pdata.target_type = 1;
-	} else
-		msm_fb_register_device("mdp", &mdp_pdata);
+	msm_fb_register_device("mdp", &mdp_pdata);
 	msm_fb_register_device("mipi_dsi", &mipi_dsi_pdata);
 #ifdef CONFIG_MSM_BUS_SCALING
 	msm_fb_register_device("dtv", &dtv_pdata);
diff --git a/arch/arm/mach-msm/board-8960-pmic.c b/arch/arm/mach-msm/board-8960-pmic.c
index 17b0b6f..5950026 100644
--- a/arch/arm/mach-msm/board-8960-pmic.c
+++ b/arch/arm/mach-msm/board-8960-pmic.c
@@ -386,25 +386,6 @@
 	KEY(0, 3, KEY_CAMERA_FOCUS),
 };
 
-static struct matrix_keymap_data keymap_data_sim = {
-	.keymap_size    = ARRAY_SIZE(keymap_sim),
-	.keymap         = keymap_sim,
-};
-
-static struct pm8xxx_keypad_platform_data keypad_data_sim = {
-	.input_name             = "keypad_8960",
-	.input_phys_device      = "keypad_8960/input0",
-	.num_rows               = 12,
-	.num_cols               = 8,
-	.rows_gpio_start	= PM8921_GPIO_PM_TO_SYS(9),
-	.cols_gpio_start	= PM8921_GPIO_PM_TO_SYS(1),
-	.debounce_ms            = 15,
-	.scan_delay_ms          = 32,
-	.row_hold_ns            = 91500,
-	.wakeup                 = 1,
-	.keymap_data            = &keymap_data_sim,
-};
-
 static int pm8921_therm_mitigation[] = {
 	1100,
 	700,
@@ -613,10 +594,6 @@
 				&msm8960_ssbi_pm8921_pdata;
 	pm8921_platform_data.num_regulators = msm_pm8921_regulator_pdata_len;
 
-	/* Simulator supports a QWERTY keypad */
-	if (machine_is_msm8960_sim())
-		pm8921_platform_data.keypad_pdata = &keypad_data_sim;
-
 	if (machine_is_msm8960_liquid()) {
 		pm8921_platform_data.keypad_pdata = &keypad_data_liquid;
 		pm8921_platform_data.leds_pdata = &pm8xxx_leds_pdata_liquid;
diff --git a/arch/arm/mach-msm/board-8960.c b/arch/arm/mach-msm/board-8960.c
index d56bdbd..63eef4a 100644
--- a/arch/arm/mach-msm/board-8960.c
+++ b/arch/arm/mach-msm/board-8960.c
@@ -1407,11 +1407,18 @@
 	msm_bus_rpm_set_mt_mask();
 	msm_bus_8960_apps_fabric_pdata.rpm_enabled = 1;
 	msm_bus_8960_sys_fabric_pdata.rpm_enabled = 1;
-	msm_bus_8960_mm_fabric_pdata.rpm_enabled = 1;
 	msm_bus_apps_fabric.dev.platform_data =
 		&msm_bus_8960_apps_fabric_pdata;
 	msm_bus_sys_fabric.dev.platform_data = &msm_bus_8960_sys_fabric_pdata;
-	msm_bus_mm_fabric.dev.platform_data = &msm_bus_8960_mm_fabric_pdata;
+	if (cpu_is_msm8960ab()) {
+		msm_bus_8960_sg_mm_fabric_pdata.rpm_enabled = 1;
+		msm_bus_mm_fabric.dev.platform_data =
+			&msm_bus_8960_sg_mm_fabric_pdata;
+	} else {
+		msm_bus_8960_mm_fabric_pdata.rpm_enabled = 1;
+		msm_bus_mm_fabric.dev.platform_data =
+			&msm_bus_8960_mm_fabric_pdata;
+	}
 	msm_bus_sys_fpb.dev.platform_data = &msm_bus_8960_sys_fpb_pdata;
 	msm_bus_cpss_fpb.dev.platform_data = &msm_bus_8960_cpss_fpb_pdata;
 #endif
@@ -2375,10 +2382,10 @@
 
 static struct msm_thermal_data msm_thermal_pdata = {
 	.sensor_id = 0,
-	.poll_ms = 1000,
-	.limit_temp = 60,
-	.temp_hysteresis = 10,
-	.limit_freq = 918000,
+	.poll_ms = 250,
+	.limit_temp_degC = 60,
+	.temp_hysteresis_degC = 10,
+	.freq_step = 2,
 };
 
 #ifdef CONFIG_MSM_FAKE_BATTERY
@@ -2576,67 +2583,6 @@
 	&msm_tsens_device,
 };
 
-static struct platform_device *sim_devices[] __initdata = {
-	&msm8960_device_uart_gsbi5,
-	&msm8960_device_otg,
-	&msm8960_device_gadget_peripheral,
-	&msm_device_hsusb_host,
-	&msm_device_hsic_host,
-	&android_usb_device,
-	&msm_device_vidc,
-	&msm_bus_apps_fabric,
-	&msm_bus_sys_fabric,
-	&msm_bus_mm_fabric,
-	&msm_bus_sys_fpb,
-	&msm_bus_cpss_fpb,
-	&msm_pcm,
-	&msm_multi_ch_pcm,
-	&msm_pcm_routing,
-	&msm_cpudai0,
-	&msm_cpudai1,
-	&msm8960_cpudai_slimbus_2_rx,
-	&msm8960_cpudai_slimbus_2_tx,
-	&msm_cpudai_hdmi_rx,
-	&msm_cpudai_bt_rx,
-	&msm_cpudai_bt_tx,
-	&msm_cpudai_fm_rx,
-	&msm_cpudai_fm_tx,
-	&msm_cpudai_auxpcm_rx,
-	&msm_cpudai_auxpcm_tx,
-	&msm_cpu_fe,
-	&msm_stub_codec,
-	&msm_voice,
-	&msm_voip,
-	&msm_lpa_pcm,
-	&msm_compr_dsp,
-	&msm_cpudai_incall_music_rx,
-	&msm_cpudai_incall_record_rx,
-	&msm_cpudai_incall_record_tx,
-
-#if defined(CONFIG_CRYPTO_DEV_QCRYPTO) || \
-		defined(CONFIG_CRYPTO_DEV_QCRYPTO_MODULE)
-	&qcrypto_device,
-#endif
-
-#if defined(CONFIG_CRYPTO_DEV_QCEDEV) || \
-		defined(CONFIG_CRYPTO_DEV_QCEDEV_MODULE)
-	&qcedev_device,
-#endif
-};
-
-static struct platform_device *rumi3_devices[] __initdata = {
-	&msm8960_device_uart_gsbi5,
-	&msm_kgsl_3d0,
-	&msm_kgsl_2d0,
-	&msm_kgsl_2d1,
-#ifdef CONFIG_MSM_GEMINI
-	&msm8960_gemini_device,
-#endif
-#ifdef CONFIG_MSM_MERCURY
-	&msm8960_mercury_device,
-#endif
-};
-
 static struct platform_device *cdp_devices[] __initdata = {
 	&msm_8960_q6_lpass,
 	&msm_8960_q6_mss_fw,
@@ -2994,10 +2940,6 @@
 	/* Build the matching 'supported_machs' bitmask */
 	if (machine_is_msm8960_cdp())
 		mach_mask = I2C_SURF;
-	else if (machine_is_msm8960_rumi3())
-		mach_mask = I2C_RUMI;
-	else if (machine_is_msm8960_sim())
-		mach_mask = I2C_SIM;
 	else if (machine_is_msm8960_fluid())
 		mach_mask = I2C_FLUID;
 	else if (machine_is_msm8960_liquid())
@@ -3036,71 +2978,6 @@
 #endif
 }
 
-static void __init msm8960_sim_init(void)
-{
-	struct msm_watchdog_pdata *wdog_pdata = (struct msm_watchdog_pdata *)
-		&msm8960_device_watchdog.dev.platform_data;
-
-	wdog_pdata->bark_time = 15000;
-	msm_tsens_early_init(&msm_tsens_pdata);
-	msm_thermal_init(&msm_thermal_pdata);
-	BUG_ON(msm_rpm_init(&msm8960_rpm_data));
-	BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
-	regulator_suppress_info_printing();
-	platform_device_register(&msm8960_device_rpm_regulator);
-	msm_clock_init(&msm8960_clock_init_data);
-	msm8960_init_pmic();
-
-	msm8960_device_otg.dev.platform_data = &msm_otg_pdata;
-	msm8960_init_gpiomux();
-	msm8960_i2c_init();
-	msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
-	msm_spm_l2_init(msm_spm_l2_data);
-	msm8960_init_buses();
-	platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
-	msm8960_pm8921_gpio_mpp_init();
-	platform_add_devices(sim_devices, ARRAY_SIZE(sim_devices));
-
-	msm8960_device_qup_spi_gsbi1.dev.platform_data =
-				&msm8960_qup_spi_gsbi1_pdata;
-	spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
-
-	msm8960_init_mmc();
-	msm8960_init_fb();
-	slim_register_board_info(msm_slim_devices,
-		ARRAY_SIZE(msm_slim_devices));
-	BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
-}
-
-static void __init msm8960_rumi3_init(void)
-{
-	msm_tsens_early_init(&msm_tsens_pdata);
-	msm_thermal_init(&msm_thermal_pdata);
-	BUG_ON(msm_rpm_init(&msm8960_rpm_data));
-	BUG_ON(msm_rpmrs_levels_init(&msm_rpmrs_data));
-	regulator_suppress_info_printing();
-	platform_device_register(&msm8960_device_rpm_regulator);
-	msm8960_init_gpiomux();
-	msm8960_init_pmic();
-	msm8960_device_qup_spi_gsbi1.dev.platform_data =
-				&msm8960_qup_spi_gsbi1_pdata;
-	spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
-	msm8960_i2c_init();
-	msm_spm_init(msm_spm_data, ARRAY_SIZE(msm_spm_data));
-	msm_spm_l2_init(msm_spm_l2_data);
-	platform_add_devices(common_devices, ARRAY_SIZE(common_devices));
-	msm8960_pm8921_gpio_mpp_init();
-	platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
-	msm8960_init_mmc();
-	register_i2c_devices();
-
-
-	msm8960_init_fb();
-	slim_register_board_info(msm_slim_devices,
-		ARRAY_SIZE(msm_slim_devices));
-	BUG_ON(msm_pm_boot_init(&msm_pm_boot_pdata));
-}
-
 static void __init msm8960_cdp_init(void)
 {
 	if (meminfo_init(SYS_MEMORY, SZ_256M) < 0)
@@ -3193,30 +3070,6 @@
 	}
 }
 
-MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
-	.map_io = msm8960_map_io,
-	.reserve = msm8960_reserve,
-	.init_irq = msm8960_init_irq,
-	.handle_irq = gic_handle_irq,
-	.timer = &msm_timer,
-	.init_machine = msm8960_sim_init,
-	.init_early = msm8960_allocate_memory_regions,
-	.init_very_early = msm8960_early_memory,
-	.restart = msm_restart,
-MACHINE_END
-
-MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
-	.map_io = msm8960_map_io,
-	.reserve = msm8960_reserve,
-	.init_irq = msm8960_init_irq,
-	.handle_irq = gic_handle_irq,
-	.timer = &msm_timer,
-	.init_machine = msm8960_rumi3_init,
-	.init_early = msm8960_allocate_memory_regions,
-	.init_very_early = msm8960_early_memory,
-	.restart = msm_restart,
-MACHINE_END
-
 MACHINE_START(MSM8960_CDP, "QCT MSM8960 CDP")
 	.map_io = msm8960_map_io,
 	.reserve = msm8960_reserve,
diff --git a/arch/arm/mach-msm/board-8974.c b/arch/arm/mach-msm/board-8974.c
index 30b44bd..d65ebe1 100644
--- a/arch/arm/mach-msm/board-8974.c
+++ b/arch/arm/mach-msm/board-8974.c
@@ -59,7 +59,7 @@
 #endif
 #define MSM_ION_MM_FW_SIZE	0xa00000 /* (10MB) */
 #define MSM_ION_MM_SIZE		0x7800000 /* (120MB) */
-#define MSM_ION_QSECOM_SIZE	0x100000 /* (1MB) */
+#define MSM_ION_QSECOM_SIZE	0x600000 /* (6MB) */
 #define MSM_ION_MFC_SIZE	SZ_8K
 #define MSM_ION_AUDIO_SIZE	0x2B4000
 #define MSM_ION_HEAP_NUM	8
@@ -656,6 +656,10 @@
 	OF_DEV_AUXDATA("qcom,mdss_mdp", 0xFD900000, "mdp.0", NULL),
 	OF_DEV_AUXDATA("qcom,msm-tsens", 0xFC4A8000, \
 			"msm-tsens", NULL),
+	OF_DEV_AUXDATA("qcom,qcedev", 0xFD440000, \
+			"qcedev.0", NULL),
+	OF_DEV_AUXDATA("qcom,qcrypto", 0xFD440000, \
+			"qcrypto.0", NULL),
 	{}
 };
 
diff --git a/arch/arm/mach-msm/board-msm7627a-camera.c b/arch/arm/mach-msm/board-msm7627a-camera.c
index 57684f9..1e198a7 100644
--- a/arch/arm/mach-msm/board-msm7627a-camera.c
+++ b/arch/arm/mach-msm/board-msm7627a-camera.c
@@ -281,8 +281,15 @@
 	.gpio_no_mux = 1,
 };
 
+static struct msm_camera_sensor_flash_src msm_flash_src_ov8825 = {
+	.flash_sr_type = MSM_CAMERA_FLASH_SRC_LED1,
+	._fsrc.ext_driver_src.led_en = 13,
+	._fsrc.ext_driver_src.led_flash_en = 32,
+};
+
 static struct msm_camera_sensor_flash_data flash_ov8825 = {
-	.flash_type     = MSM_CAMERA_FLASH_NONE,
+	.flash_type     = MSM_CAMERA_FLASH_LED,
+	.flash_src      = &msm_flash_src_ov8825,
 };
 
 static struct msm_camera_sensor_platform_info sensor_board_info_ov8825 = {
diff --git a/arch/arm/mach-msm/board-msm7x27a.c b/arch/arm/mach-msm/board-msm7x27a.c
index ba4e098..3bd7eeb 100644
--- a/arch/arm/mach-msm/board-msm7x27a.c
+++ b/arch/arm/mach-msm/board-msm7x27a.c
@@ -883,6 +883,7 @@
  */
 static struct ion_platform_data ion_pdata = {
 	.nr = MSM_ION_HEAP_NUM,
+	.has_outer_cache = 1,
 	.heaps = {
 		{
 			.id	= ION_SYSTEM_HEAP_ID,
@@ -896,7 +897,6 @@
 			.type	= ION_HEAP_TYPE_CARVEOUT,
 			.name	= ION_CAMERA_HEAP_NAME,
 			.memory_type = ION_EBI_TYPE,
-			.has_outer_cache = 1,
 			.extra_data = (void *)&co_ion_pdata,
 		},
 		/* PMEM_AUDIO */
@@ -905,7 +905,6 @@
 			.type	= ION_HEAP_TYPE_CARVEOUT,
 			.name	= ION_AUDIO_HEAP_NAME,
 			.memory_type = ION_EBI_TYPE,
-			.has_outer_cache = 1,
 			.extra_data = (void *)&co_ion_pdata,
 		},
 		/* PMEM_MDP = SF */
@@ -914,7 +913,6 @@
 			.type	= ION_HEAP_TYPE_CARVEOUT,
 			.name	= ION_SF_HEAP_NAME,
 			.memory_type = ION_EBI_TYPE,
-			.has_outer_cache = 1,
 			.extra_data = (void *)&co_ion_pdata,
 		},
 #endif
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 1b21c23..50b1ac5 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -468,26 +468,6 @@
 	},
 };
 
-/*
- * The smc91x configuration varies depending on platform.
- * The resources data structure is filled in at runtime.
- */
-static struct resource smc91x_resources[] = {
-	[0] = {
-		.flags = IORESOURCE_MEM,
-	},
-	[1] = {
-		.flags = IORESOURCE_IRQ,
-	},
-};
-
-static struct platform_device smc91x_device = {
-	.name          = "smc91x",
-	.id            = 0,
-	.num_resources = ARRAY_SIZE(smc91x_resources),
-	.resource      = smc91x_resources,
-};
-
 static struct resource smsc911x_resources[] = {
 	[0] = {
 		.flags = IORESOURCE_MEM,
@@ -4208,72 +4188,6 @@
 	.id = -1,
 };
 
-static struct platform_device *rumi_sim_devices[] __initdata = {
-	&smc91x_device,
-	&msm_device_uart_dm12,
-#ifdef CONFIG_I2C_QUP
-	&msm_gsbi3_qup_i2c_device,
-	&msm_gsbi4_qup_i2c_device,
-	&msm_gsbi7_qup_i2c_device,
-	&msm_gsbi8_qup_i2c_device,
-	&msm_gsbi9_qup_i2c_device,
-	&msm_gsbi12_qup_i2c_device,
-#endif
-#ifdef CONFIG_I2C_SSBI
-	&msm_device_ssbi3,
-#endif
-#ifdef CONFIG_ANDROID_PMEM
-#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
-	&android_pmem_device,
-	&android_pmem_adsp_device,
-	&android_pmem_smipool_device,
-	&android_pmem_audio_device,
-#endif /*CONFIG_MSM_MULTIMEDIA_USE_ION*/
-#endif /*CONFIG_ANDROID_PMEM*/
-#ifdef CONFIG_MSM_ROTATOR
-	&msm_rotator_device,
-#endif
-	&msm_fb_device,
-	&msm_kgsl_3d0,
-	&msm_kgsl_2d0,
-	&msm_kgsl_2d1,
-	&lcdc_samsung_panel_device,
-#ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL
-	&hdmi_msm_device,
-#endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL */
-#ifdef CONFIG_MSM_CAMERA
-#ifndef CONFIG_MSM_CAMERA_V4L2
-#ifdef CONFIG_MT9E013
-	&msm_camera_sensor_mt9e013,
-#endif
-#ifdef CONFIG_IMX074
-	&msm_camera_sensor_imx074,
-#endif
-#ifdef CONFIG_VX6953
-	&msm_camera_sensor_vx6953,
-#endif
-#ifdef CONFIG_WEBCAM_OV7692
-	&msm_camera_sensor_webcam_ov7692,
-#endif
-#ifdef CONFIG_WEBCAM_OV9726
-	&msm_camera_sensor_webcam_ov9726,
-#endif
-#ifdef CONFIG_QS_S5K4E1
-	&msm_camera_sensor_qs_s5k4e1,
-#endif
-#endif
-#endif
-#ifdef CONFIG_MSM_GEMINI
-	&msm_gemini_device,
-#endif
-#ifdef CONFIG_MSM_VPE
-#ifndef CONFIG_MSM_CAMERA_V4L2
-	&msm_vpe_device,
-#endif
-#endif
-	&msm_device_vidc,
-};
-
 #if defined(CONFIG_GPIO_SX150X) || defined(CONFIG_GPIO_SX150X_MODULE)
 enum {
 	SX150X_CORE,
@@ -7452,10 +7366,6 @@
 		mach_mask = I2C_SURF;
 	else if (machine_is_msm8x60_ffa() || machine_is_msm8x60_fusn_ffa())
 		mach_mask = I2C_FFA;
-	else if (machine_is_msm8x60_rumi3())
-		mach_mask = I2C_RUMI;
-	else if (machine_is_msm8x60_sim())
-		mach_mask = I2C_SIM;
 	else if (machine_is_msm8x60_fluid())
 		mach_mask = I2C_FLUID;
 	else if (machine_is_msm8x60_dragon())
@@ -7642,10 +7552,6 @@
 			machine_is_msm8x60_fluid() ||
 			machine_is_msm8x60_dragon())
 			ebi2_cfg |= (1 << 4) | (1 << 5); /* CS2, CS3 */
-		else if (machine_is_msm8x60_sim())
-			ebi2_cfg |= (1 << 4); /* CS2 */
-		else if (machine_is_msm8x60_rumi3())
-			ebi2_cfg |= (1 << 5); /* CS3 */
 
 		writel_relaxed(ebi2_cfg, ebi2_cfg_ptr);
 		iounmap(ebi2_cfg_ptr);
@@ -7686,32 +7592,6 @@
 	}
 }
 
-static void __init msm8x60_configure_smc91x(void)
-{
-	if (machine_is_msm8x60_sim()) {
-
-		smc91x_resources[0].start = 0x1b800300;
-		smc91x_resources[0].end   = 0x1b8003ff;
-
-		smc91x_resources[1].start = (NR_MSM_IRQS + 40);
-		smc91x_resources[1].end   = (NR_MSM_IRQS + 40);
-
-	} else if (machine_is_msm8x60_rumi3()) {
-
-		smc91x_resources[0].start = 0x1d000300;
-		smc91x_resources[0].end   = 0x1d0003ff;
-
-		smc91x_resources[1].start = TLMM_MSM_DIR_CONN_IRQ_0;
-		smc91x_resources[1].end   = TLMM_MSM_DIR_CONN_IRQ_0;
-	}
-}
-
-static void __init msm8x60_init_tlmm(void)
-{
-	if (machine_is_msm8x60_rumi3())
-		msm_gpio_install_direct_irq(0, 0, 1);
-}
-
 #if (defined(CONFIG_MMC_MSM_SDC1_SUPPORT)\
 	|| defined(CONFIG_MMC_MSM_SDC2_SUPPORT)\
 	|| defined(CONFIG_MMC_MSM_SDC3_SUPPORT)\
@@ -9875,10 +9755,7 @@
 	mdp_pdata.num_mdp_clk = 0;
 	mdp_pdata.mdp_core_clk_rate = 200000000;
 #endif
-	if (machine_is_msm8x60_rumi3())
-		msm_fb_register_device("mdp", NULL);
-	else
-		msm_fb_register_device("mdp", &mdp_pdata);
+	msm_fb_register_device("mdp", &mdp_pdata);
 
 	msm_fb_register_device("lcdc", &lcdc_pdata);
 	msm_fb_register_device("mipi_dsi", &mipi_dsi_pdata);
@@ -10300,14 +10177,6 @@
 	struct msm_gpiomux_configs *gpiomux_cfgs;
 };
 
-static struct msm_board_data msm8x60_rumi3_board_data __initdata = {
-	.gpiomux_cfgs = msm8x60_surf_ffa_gpiomux_cfgs,
-};
-
-static struct msm_board_data msm8x60_sim_board_data __initdata = {
-	.gpiomux_cfgs = msm8x60_surf_ffa_gpiomux_cfgs,
-};
-
 static struct msm_board_data msm8x60_surf_board_data __initdata = {
 	.gpiomux_cfgs = msm8x60_surf_ffa_gpiomux_cfgs,
 };
@@ -10418,12 +10287,9 @@
 	 * it disabled for all others for additional power savings.
 	 */
 	if (machine_is_msm8x60_surf() || machine_is_msm8x60_ffa() ||
-			machine_is_msm8x60_rumi3() ||
-			machine_is_msm8x60_sim() ||
 			machine_is_msm8x60_fluid() ||
 			machine_is_msm8x60_dragon())
 		msm8x60_init_ebi2();
-	msm8x60_init_tlmm();
 	msm8x60_init_gpiomux(board_data->gpiomux_cfgs);
 	msm8x60_init_uart12dm();
 #ifdef CONFIG_MSM_CAMERA_V4L2
@@ -10513,10 +10379,6 @@
 #endif
 			platform_add_devices(asoc_devices,
 					ARRAY_SIZE(asoc_devices));
-	} else {
-		msm8x60_configure_smc91x();
-		platform_add_devices(rumi_sim_devices,
-				     ARRAY_SIZE(rumi_sim_devices));
 	}
 #if defined(CONFIG_USB_PEHCI_HCD) || defined(CONFIG_USB_PEHCI_HCD_MODULE)
 	if (machine_is_msm8x60_surf() || machine_is_msm8x60_ffa() ||
@@ -10540,8 +10402,7 @@
 	if (machine_is_msm8x60_fluid())
 		cyttsp_set_params();
 #endif
-	if (!machine_is_msm8x60_sim())
-		msm_fb_add_devices();
+	msm_fb_add_devices();
 	fixup_i2c_configs();
 	register_i2c_devices();
 
@@ -10608,16 +10469,6 @@
 		msm_fusion_setup_pinctrl();
 }
 
-static void __init msm8x60_rumi3_init(void)
-{
-	msm8x60_init(&msm8x60_rumi3_board_data);
-}
-
-static void __init msm8x60_sim_init(void)
-{
-	msm8x60_init(&msm8x60_sim_board_data);
-}
-
 static void __init msm8x60_surf_init(void)
 {
 	msm8x60_init(&msm8x60_surf_board_data);
@@ -10653,28 +10504,6 @@
 	msm8x60_init(&msm8x60_dragon_board_data);
 }
 
-MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
-	.map_io = msm8x60_map_io,
-	.reserve = msm8x60_reserve,
-	.init_irq = msm8x60_init_irq,
-	.handle_irq = gic_handle_irq,
-	.init_machine = msm8x60_rumi3_init,
-	.timer = &msm_timer,
-	.init_early = msm8x60_charm_init_early,
-	.restart = msm_restart,
-MACHINE_END
-
-MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
-	.map_io = msm8x60_map_io,
-	.reserve = msm8x60_reserve,
-	.init_irq = msm8x60_init_irq,
-	.handle_irq = gic_handle_irq,
-	.init_machine = msm8x60_sim_init,
-	.timer = &msm_timer,
-	.init_early = msm8x60_charm_init_early,
-	.restart = msm_restart,
-MACHINE_END
-
 MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
 	.map_io = msm8x60_map_io,
 	.reserve = msm8x60_reserve,
diff --git a/arch/arm/mach-msm/board-qrd7627a.c b/arch/arm/mach-msm/board-qrd7627a.c
index a694557..1921cc3 100644
--- a/arch/arm/mach-msm/board-qrd7627a.c
+++ b/arch/arm/mach-msm/board-qrd7627a.c
@@ -772,6 +772,7 @@
  */
 static struct ion_platform_data ion_pdata = {
 	.nr = MSM_ION_HEAP_NUM,
+	.has_outer_cache = 1,
 	.heaps = {
 		{
 			.id	= ION_SYSTEM_HEAP_ID,
@@ -786,7 +787,6 @@
 			.name	= ION_CAMERA_HEAP_NAME,
 			.size	= MSM_ION_CAMERA_SIZE,
 			.memory_type = ION_EBI_TYPE,
-			.has_outer_cache = 1,
 			.extra_data = (void *)&co_ion_pdata,
 		},
 		/* PMEM_AUDIO */
@@ -796,7 +796,6 @@
 			.name	= ION_AUDIO_HEAP_NAME,
 			.size	= MSM_ION_AUDIO_SIZE,
 			.memory_type = ION_EBI_TYPE,
-			.has_outer_cache = 1,
 			.extra_data = (void *)&co_ion_pdata,
 		},
 		/* PMEM_MDP = SF */
@@ -806,7 +805,6 @@
 			.name	= ION_SF_HEAP_NAME,
 			.size	= MSM_ION_SF_SIZE,
 			.memory_type = ION_EBI_TYPE,
-			.has_outer_cache = 1,
 			.extra_data = (void *)&co_ion_pdata,
 		},
 #endif
diff --git a/arch/arm/mach-msm/clock-8974.c b/arch/arm/mach-msm/clock-8974.c
index 7948143..82b7fd6 100644
--- a/arch/arm/mach-msm/clock-8974.c
+++ b/arch/arm/mach-msm/clock-8974.c
@@ -20,6 +20,7 @@
 #include <linux/clk.h>
 
 #include <mach/clk.h>
+#include <mach/rpm-regulator-smd.h>
 
 #include "clock-local2.h"
 #include "clock-pll.h"
@@ -197,6 +198,7 @@
 #define AHB_CMD_RCGR                   0x5000
 #define AXI_CMD_RCGR                   0x5040
 #define OCMEMNOC_CMD_RCGR              0x5090
+#define OCMEMCX_OCMEMNOC_CBCR          0x4058
 
 #define MMSS_BCR                  0x0240
 #define USB_30_BCR                0x03C0
@@ -580,17 +582,26 @@
 	VDD_DIG_HIGH
 };
 
+static const int vdd_corner[] = {
+	[VDD_DIG_NONE]	  = RPM_REGULATOR_CORNER_NONE,
+	[VDD_DIG_LOW]	  = RPM_REGULATOR_CORNER_SVS_SOC,
+	[VDD_DIG_NOMINAL] = RPM_REGULATOR_CORNER_NORMAL,
+	[VDD_DIG_HIGH]	  = RPM_REGULATOR_CORNER_SUPER_TURBO,
+};
+
+static struct rpm_regulator *vdd_dig_reg;
+
 static int set_vdd_dig(struct clk_vdd_class *vdd_class, int level)
 {
-	/* TODO: Actually call into regulator APIs to set VDD_DIG here. */
-	return 0;
+	return rpm_regulator_set_voltage(vdd_dig_reg, vdd_corner[level],
+					RPM_REGULATOR_CORNER_SUPER_TURBO);
 }
 
 static DEFINE_VDD_CLASS(vdd_dig, set_vdd_dig);
 
-#define RPM_MISC_CLK_TYPE 0x306b6c63
-#define RPM_BUS_CLK_TYPE  0x316b6c63
-#define RPM_MEM_CLK_TYPE  0x326b6c63
+#define RPM_MISC_CLK_TYPE	0x306b6c63
+#define RPM_BUS_CLK_TYPE	0x316b6c63
+#define RPM_MEM_CLK_TYPE	0x326b6c63
 
 #define CXO_ID		0x0
 #define QDSS_ID		0x1
@@ -603,6 +614,14 @@
 #define BIMC_ID		0x0
 #define OCMEM_ID	0x1
 
+enum {
+	D0_ID = 1,
+	D1_ID,
+	A0_ID,
+	A1_ID,
+	A2_ID,
+};
+
 DEFINE_CLK_RPM_SMD(pnoc_clk, pnoc_a_clk, RPM_BUS_CLK_TYPE, PNOC_ID, NULL);
 DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_ID, NULL);
 DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_ID, NULL);
@@ -617,6 +636,18 @@
 				RPM_MISC_CLK_TYPE, CXO_ID, 19200000);
 DEFINE_CLK_RPM_SMD_QDSS(qdss_clk, qdss_a_clk, RPM_MISC_CLK_TYPE, QDSS_ID);
 
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_d0, cxo_d0_a, D0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_d1, cxo_d1_a, D1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_a0, cxo_a0_a, A0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_a1, cxo_a1_a, A1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(cxo_a2, cxo_a2_a, A2_ID);
+
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_d0_pin, cxo_d0_a_pin, D0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_d1_pin, cxo_d1_a_pin, D1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a0_pin, cxo_a0_a_pin, A0_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a1_pin, cxo_a1_a_pin, A1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(cxo_a2_pin, cxo_a2_a_pin, A2_ID);
+
 static struct pll_vote_clk gpll0_clk_src = {
 	.en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE_REG,
 	.status_reg = (void __iomem *)GPLL0_STATUS_REG,
@@ -689,7 +720,7 @@
 	.base = &virt_bases[MMSS_BASE],
 	.c = {
 		.dbg_name = "mmpll1_clk_src",
-		.rate = 1000000000,
+		.rate = 846000000,
 		.ops = &clk_ops_pll_vote,
 		.warned = true,
 		CLK_INIT(mmpll1_clk_src.c),
@@ -705,6 +736,7 @@
 		.dbg_name = "mmpll3_clk_src",
 		.rate = 1000000000,
 		.ops = &clk_ops_local_pll,
+		.warned = true,
 		CLK_INIT(mmpll3_clk_src.c),
 	},
 };
@@ -2205,6 +2237,28 @@
 	},
 };
 
+struct branch_clk gcc_mmss_noc_cfg_ahb_clk = {
+	.cbcr_reg = MMSS_NOC_CFG_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_mmss_noc_cfg_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_mmss_noc_cfg_ahb_clk.c),
+	},
+};
+
+struct branch_clk gcc_ocmem_noc_cfg_ahb_clk = {
+	.cbcr_reg = OCMEM_NOC_CFG_AHB_CBCR,
+	.has_sibling = 1,
+	.base = &virt_bases[GCC_BASE],
+	.c = {
+		.dbg_name = "gcc_ocmem_noc_cfg_ahb_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(gcc_ocmem_noc_cfg_ahb_clk.c),
+	},
+};
+
 static struct branch_clk gcc_mss_cfg_ahb_clk = {
 	.cbcr_reg = MSS_CFG_AHB_CBCR,
 	.has_sibling = 1,
@@ -2217,10 +2271,11 @@
 };
 
 static struct clk_freq_tbl ftbl_mmss_axi_clk[] = {
-	F_MM( 19200000,    cxo,   1,   0,   0),
-	F_MM(150000000,  gpll0,   4,   0,   0),
-	F_MM(333330000, mmpll1,   3,   0,   0),
-	F_MM(400000000, mmpll0,   2,   0,   0),
+	F_MM( 19200000,    cxo,     1,   0,   0),
+	F_MM(150000000,  gpll0,     4,   0,   0),
+	F_MM(282000000, mmpll1,     3,   0,   0),
+	F_MM(320000000, mmpll1,   2.5,   0,   0),
+	F_MM(400000000, mmpll0,     2,   0,   0),
 	F_END
 };
 
@@ -2233,8 +2288,8 @@
 	.c = {
 		.dbg_name = "axi_clk_src",
 		.ops = &clk_ops_rcg,
-		VDD_DIG_FMAX_MAP3(LOW, 150000000, NOMINAL, 333330000,
-				  HIGH, 400000000),
+		VDD_DIG_FMAX_MAP3(LOW, 150000000, NOMINAL, 282000000,
+				  HIGH, 320000000),
 		CLK_INIT(axi_clk_src.c),
 	},
 };
@@ -2242,7 +2297,7 @@
 static struct clk_freq_tbl ftbl_ocmemnoc_clk[] = {
 	F_MM( 19200000,    cxo,   1,   0,   0),
 	F_MM(150000000,  gpll0,   4,   0,   0),
-	F_MM(333330000, mmpll1,   3,   0,   0),
+	F_MM(282000000, mmpll1,   3,   0,   0),
 	F_MM(400000000, mmpll0,   2,   0,   0),
 	F_END
 };
@@ -2256,7 +2311,7 @@
 	.c = {
 		.dbg_name = "ocmemnoc_clk_src",
 		.ops = &clk_ops_rcg,
-		VDD_DIG_FMAX_MAP3(LOW, 150000000, NOMINAL, 333330000,
+		VDD_DIG_FMAX_MAP3(LOW, 150000000, NOMINAL, 282000000,
 				  HIGH, 400000000),
 		CLK_INIT(ocmemnoc_clk_src.c),
 	},
@@ -3735,6 +3790,18 @@
 	},
 };
 
+struct branch_clk ocmemcx_ocmemnoc_clk = {
+	.cbcr_reg = OCMEMCX_OCMEMNOC_CBCR,
+	.parent = &ocmemnoc_clk_src.c,
+	.has_sibling = 1,
+	.base = &virt_bases[MMSS_BASE],
+	.c = {
+		.dbg_name = "ocmemcx_ocmemnoc_clk",
+		.ops = &clk_ops_branch,
+		CLK_INIT(ocmemcx_ocmemnoc_clk.c),
+	},
+};
+
 static struct branch_clk venus0_ahb_clk = {
 	.cbcr_reg = VENUS0_AHB_CBCR,
 	.has_sibling = 1,
@@ -4319,6 +4386,8 @@
 	{&gcc_blsp2_uart5_apps_clk.c,		GCC_BASE, 0x00c6},
 	{&gcc_blsp2_uart6_apps_clk.c,		GCC_BASE, 0x00cb},
 	{&gcc_boot_rom_ahb_clk.c,		GCC_BASE, 0x0100},
+	{&gcc_ocmem_noc_cfg_ahb_clk.c,		GCC_BASE, 0x0029},
+	{&gcc_mmss_noc_cfg_ahb_clk.c,		GCC_BASE, 0x002A},
 	{&gcc_mss_cfg_ahb_clk.c,		GCC_BASE, 0x0030},
 	{&gcc_ce1_clk.c,			GCC_BASE, 0x0140},
 	{&gcc_ce2_clk.c,			GCC_BASE, 0x0148},
@@ -4346,6 +4415,7 @@
 	{&mmss_mmssnoc_ahb_clk.c,		MMSS_BASE, 0x0001},
 	{&mmss_mmssnoc_axi_clk.c,		MMSS_BASE, 0x0004},
 	{&ocmemnoc_clk.c,			MMSS_BASE, 0x0007},
+	{&ocmemcx_ocmemnoc_clk.c,		MMSS_BASE, 0x0009},
 	{&camss_cci_cci_ahb_clk.c,		MMSS_BASE, 0x002e},
 	{&camss_cci_cci_clk.c,			MMSS_BASE, 0x002d},
 	{&camss_csi0_ahb_clk.c,			MMSS_BASE, 0x0042},
@@ -4669,6 +4739,16 @@
 	CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, ""),
 	CLK_LOOKUP("bus_clk", gcc_ce2_axi_clk.c, ""),
 
+	CLK_LOOKUP("core_clk",     gcc_ce2_clk.c,         "qcedev.0"),
+	CLK_LOOKUP("iface_clk",    gcc_ce2_ahb_clk.c,     "qcedev.0"),
+	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c,     "qcedev.0"),
+	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,         "qcedev.0"),
+
+	CLK_LOOKUP("core_clk",     gcc_ce2_clk.c,     "qcrypto.0"),
+	CLK_LOOKUP("iface_clk",    gcc_ce2_ahb_clk.c, "qcrypto.0"),
+	CLK_LOOKUP("bus_clk",      gcc_ce2_axi_clk.c, "qcrypto.0"),
+	CLK_LOOKUP("core_clk_src", ce2_clk_src.c,     "qcrypto.0"),
+
 	CLK_LOOKUP("core_clk", gcc_gp1_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_gp2_clk.c, ""),
 	CLK_LOOKUP("core_clk", gcc_gp3_clk.c, ""),
@@ -4765,8 +4845,8 @@
 	CLK_LOOKUP("core_clk", camss_phy1_csi1phytimer_clk.c, ""),
 	CLK_LOOKUP("core_clk", camss_phy2_csi2phytimer_clk.c, ""),
 	CLK_LOOKUP("iface_clk", camss_top_ahb_clk.c, ""),
-	CLK_LOOKUP("iface_clk", camss_vfe_cpp_ahb_clk.c, ""),
-	CLK_LOOKUP("core_clk", camss_vfe_cpp_clk.c, ""),
+	CLK_LOOKUP("iface_clk", camss_vfe_cpp_ahb_clk.c, "fda44000.qcom,iommu"),
+	CLK_LOOKUP("core_clk", camss_vfe_cpp_clk.c, "fda44000.qcom,iommu"),
 	CLK_LOOKUP("camss_vfe_vfe0_clk", camss_vfe_vfe0_clk.c, ""),
 	CLK_LOOKUP("camss_vfe_vfe1_clk", camss_vfe_vfe1_clk.c, ""),
 	CLK_LOOKUP("vfe0_clk_src", vfe0_clk_src.c, ""),
@@ -4780,9 +4860,13 @@
 	CLK_LOOKUP("bus_clk", mdss_axi_clk.c, "mdp.0"),
 	CLK_LOOKUP("core_clk", oxili_gfx3d_clk.c, "fdb00000.qcom,kgsl-3d0"),
 	CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, "fdb00000.qcom,kgsl-3d0"),
+	CLK_LOOKUP("mem_iface_clk", ocmemcx_ocmemnoc_clk.c,
+						"fdb00000.qcom,kgsl-3d0"),
 	CLK_LOOKUP("core_clk", oxilicx_axi_clk.c, "fdb10000.qcom,iommu"),
 	CLK_LOOKUP("iface_clk", oxilicx_ahb_clk.c, "fdb10000.qcom,iommu"),
+	CLK_LOOKUP("alt_core_clk", oxili_gfx3d_clk.c, "fdb10000.qcom,iommu"),
 	CLK_LOOKUP("iface_clk", venus0_ahb_clk.c, "fdc84000.qcom,iommu"),
+	CLK_LOOKUP("alt_core_clk", venus0_vcodec0_clk.c, "fdc84000.qcom,iommu"),
 	CLK_LOOKUP("core_clk", venus0_axi_clk.c, "fdc84000.qcom,iommu"),
 	CLK_LOOKUP("bus_clk", venus0_axi_clk.c, ""),
 	CLK_LOOKUP("src_clk",  vcodec0_clk_src.c, "fdce0000.qcom,venus"),
@@ -4869,6 +4953,8 @@
 	CLK_LOOKUP("bus_a_clk",	ocmemnoc_clk.c,		"msm_ocmem_noc"),
 	CLK_LOOKUP("bus_clk",	mmss_mmssnoc_axi_clk.c,	"msm_mmss_noc"),
 	CLK_LOOKUP("bus_a_clk",	mmss_mmssnoc_axi_clk.c,	"msm_mmss_noc"),
+	CLK_LOOKUP("iface_clk", gcc_mmss_noc_cfg_ahb_clk.c, ""),
+	CLK_LOOKUP("iface_clk", gcc_ocmem_noc_cfg_ahb_clk.c, ""),
 
 	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tmc-etr"),
 	CLK_LOOKUP("core_clk", qdss_clk.c, "coresight-tpiu"),
@@ -4988,9 +5074,9 @@
 
 /* MMPLL1 at 1000 MHz, main output enabled. */
 static struct pll_config mmpll1_config __initdata = {
-	.l = 0x34,
+	.l = 0x2C,
 	.m = 0x1,
-	.n = 0xC,
+	.n = 0x10,
 	.vco_val = 0x0,
 	.vco_mask = BM(21, 20),
 	.pre_div_val = 0x0,
@@ -5093,8 +5179,8 @@
 
 static void __init msm8974_clock_post_init(void)
 {
-	clk_set_rate(&axi_clk_src.c, 333330000);
-	clk_set_rate(&ocmemnoc_clk_src.c, 333330000);
+	clk_set_rate(&axi_clk_src.c, 282000000);
+	clk_set_rate(&ocmemnoc_clk_src.c, 282000000);
 
 	/*
 	 * Hold an active set vote at a rate of 40MHz for the MMSS NOC AHB
@@ -5109,6 +5195,13 @@
 	 */
 	clk_prepare_enable(&cxo_a_clk_src.c);
 
+	/*
+	 * TODO: Temporarily enable NOC configuration AHB clocks. Remove when
+	 * the bus driver is ready.
+	 */
+	clk_prepare_enable(&gcc_mmss_noc_cfg_ahb_clk.c);
+	clk_prepare_enable(&gcc_ocmem_noc_cfg_ahb_clk.c);
+
 	/* Set rates for single-rate clocks. */
 	clk_set_rate(&usb30_master_clk_src.c,
 			usb30_master_clk_src.freq_tbl[0].freq_hz);
@@ -5170,12 +5263,31 @@
 
 	clk_ops_local_pll.enable = msm8974_pll_clk_enable;
 
+	vdd_dig_reg = rpm_regulator_get(NULL, "vdd_dig");
+	if (IS_ERR(vdd_dig_reg))
+		panic("clock-copper: Unable to get the vdd_dig regulator!");
+
+	/*
+	 * TODO: Set a voltage and enable vdd_dig, leaving the voltage high
+	 * until late_init. This may not be necessary with clock handoff;
+	 * Investigate this code on a real non-simulator target to determine
+	 * its necessity.
+	 */
+	vote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
+	rpm_regulator_enable(vdd_dig_reg);
+
 	reg_init();
 }
 
+static int __init msm8974_clock_late_init(void)
+{
+	return unvote_vdd_level(&vdd_dig, VDD_DIG_HIGH);
+}
+
 struct clock_init_data msm8974_clock_init_data __initdata = {
 	.table = msm_clocks_8974,
 	.size = ARRAY_SIZE(msm_clocks_8974),
 	.pre_init = msm8974_clock_pre_init,
 	.post_init = msm8974_clock_post_init,
+	.late_init = msm8974_clock_late_init,
 };
diff --git a/arch/arm/mach-msm/clock-local.c b/arch/arm/mach-msm/clock-local.c
index ca913dc..51e5703d 100644
--- a/arch/arm/mach-msm/clock-local.c
+++ b/arch/arm/mach-msm/clock-local.c
@@ -355,7 +355,7 @@
 {
 	u32 reg_val;
 
-	reg_val = readl_relaxed(b->ctl_reg);
+	reg_val = b->ctl_reg ? readl_relaxed(b->ctl_reg) : 0;
 	if (b->en_mask) {
 		reg_val &= ~(b->en_mask);
 		writel_relaxed(reg_val, b->ctl_reg);
@@ -564,7 +564,7 @@
 	if (!branch_in_hwcg_mode(b)) {
 		b->hwcg_mask = 0;
 		c->flags &= ~CLKFLAG_HWCG;
-		if (readl_relaxed(b->ctl_reg) & b->en_mask)
+		if (b->ctl_reg && readl_relaxed(b->ctl_reg) & b->en_mask)
 			return HANDOFF_ENABLED_CLK;
 	} else {
 		c->flags |= CLKFLAG_HWCG;
diff --git a/arch/arm/mach-msm/clock-pll.c b/arch/arm/mach-msm/clock-pll.c
index 2938135..d5831e2 100644
--- a/arch/arm/mach-msm/clock-pll.c
+++ b/arch/arm/mach-msm/clock-pll.c
@@ -319,6 +319,7 @@
 	{60, 1152000000},
 	{62, 1200000000},
 	{63, 1209600000},
+	{73, 1401600000},
 	{0, 0},
 };
 
diff --git a/arch/arm/mach-msm/clock-rpm.h b/arch/arm/mach-msm/clock-rpm.h
index 22691c5..e203028 100644
--- a/arch/arm/mach-msm/clock-rpm.h
+++ b/arch/arm/mach-msm/clock-rpm.h
@@ -21,6 +21,10 @@
 #define RPM_SMD_KEY_ENABLE	0x62616E45
 #define RPM_SMD_KEY_STATE	0x54415453
 
+#define RPM_CLK_BUFFER_A_REQ			0x616B6C63
+#define RPM_KEY_SOFTWARE_ENABLE			0x6E657773
+#define RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY	0x62636370
+
 struct clk_ops;
 struct clk_rpmrs_data;
 extern struct clk_ops clk_ops_rpm;
@@ -187,5 +191,17 @@
 #define DEFINE_CLK_RPM_SMD_QDSS(name, active, type, r_id) \
 	__DEFINE_CLK_RPM_QDSS(name, active, type, r_id, \
 		0, RPM_SMD_KEY_STATE, &clk_rpmrs_data_smd)
+/*
+ * The RPM XO buffer clock management code aggregates votes for pin-control mode
+ * and software mode separately. Software-enable has higher priority over pin-
+ * control, and if the software-mode aggregation results in a 'disable', the
+ * buffer will be left in pin-control mode if a pin-control vote is in place.
+ */
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER(name, active, r_id) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+			1000, RPM_KEY_SOFTWARE_ENABLE, &clk_rpmrs_data_smd)
 
+#define DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(name, active, r_id) \
+	__DEFINE_CLK_RPM_BRANCH(name, active, RPM_CLK_BUFFER_A_REQ, r_id, 0, \
+	1000, RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY, &clk_rpmrs_data_smd)
 #endif
diff --git a/arch/arm/mach-msm/devices-8064.c b/arch/arm/mach-msm/devices-8064.c
index aa67690..a6f18ba 100644
--- a/arch/arm/mach-msm/devices-8064.c
+++ b/arch/arm/mach-msm/devices-8064.c
@@ -15,6 +15,7 @@
 #include <linux/list.h>
 #include <linux/platform_device.h>
 #include <linux/msm_rotator.h>
+#include <linux/gpio.h>
 #include <linux/clkdev.h>
 #include <linux/dma-mapping.h>
 #include <linux/coresight.h>
@@ -27,6 +28,7 @@
 #include <mach/msm_dsps.h>
 #include <sound/msm-dai-q6.h>
 #include <sound/apr_audio.h>
+#include <mach/msm_tsif.h>
 #include <mach/msm_bus_board.h>
 #include <mach/rpm.h>
 #include <mach/mdm2.h>
@@ -464,6 +466,112 @@
 	.id     = 0x4009,
 };
 
+#define MSM_TSIF0_PHYS       (0x18200000)
+#define MSM_TSIF1_PHYS       (0x18201000)
+#define MSM_TSIF_SIZE        (0x200)
+
+#define TSIF_0_CLK       GPIO_CFG(55, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_0_EN        GPIO_CFG(56, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_0_DATA      GPIO_CFG(57, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_0_SYNC      GPIO_CFG(62, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_CLK       GPIO_CFG(59, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_EN        GPIO_CFG(60, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_DATA      GPIO_CFG(61, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+#define TSIF_1_SYNC      GPIO_CFG(58, 1, GPIO_CFG_INPUT, \
+	GPIO_CFG_PULL_DOWN, GPIO_CFG_2MA)
+
+static const struct msm_gpio tsif0_gpios[] = {
+	{ .gpio_cfg = TSIF_0_CLK,  .label =  "tsif_clk", },
+	{ .gpio_cfg = TSIF_0_EN,   .label =  "tsif_en", },
+	{ .gpio_cfg = TSIF_0_DATA, .label =  "tsif_data", },
+	{ .gpio_cfg = TSIF_0_SYNC, .label =  "tsif_sync", },
+};
+
+static const struct msm_gpio tsif1_gpios[] = {
+	{ .gpio_cfg = TSIF_1_CLK,  .label =  "tsif_clk", },
+	{ .gpio_cfg = TSIF_1_EN,   .label =  "tsif_en", },
+	{ .gpio_cfg = TSIF_1_DATA, .label =  "tsif_data", },
+	{ .gpio_cfg = TSIF_1_SYNC, .label =  "tsif_sync", },
+};
+
+struct msm_tsif_platform_data tsif1_8064_platform_data = {
+	.num_gpios = ARRAY_SIZE(tsif1_gpios),
+	.gpios = tsif1_gpios,
+	.tsif_pclk = "iface_clk",
+	.tsif_ref_clk = "ref_clk",
+};
+
+struct resource tsif1_8064_resources[] = {
+	[0] = {
+		.flags = IORESOURCE_IRQ,
+		.start = TSIF2_IRQ,
+		.end   = TSIF2_IRQ,
+	},
+	[1] = {
+		.flags = IORESOURCE_MEM,
+		.start = MSM_TSIF1_PHYS,
+		.end   = MSM_TSIF1_PHYS + MSM_TSIF_SIZE - 1,
+	},
+	[2] = {
+		.flags = IORESOURCE_DMA,
+		.start = DMOV8064_TSIF_CHAN,
+		.end   = DMOV8064_TSIF_CRCI,
+	},
+};
+
+struct msm_tsif_platform_data tsif0_8064_platform_data = {
+	.num_gpios = ARRAY_SIZE(tsif0_gpios),
+	.gpios = tsif0_gpios,
+	.tsif_pclk = "iface_clk",
+	.tsif_ref_clk = "ref_clk",
+};
+
+struct resource tsif0_8064_resources[] = {
+	[0] = {
+		.flags = IORESOURCE_IRQ,
+		.start = TSIF1_IRQ,
+		.end   = TSIF1_IRQ,
+	},
+	[1] = {
+		.flags = IORESOURCE_MEM,
+		.start = MSM_TSIF0_PHYS,
+		.end   = MSM_TSIF0_PHYS + MSM_TSIF_SIZE - 1,
+	},
+	[2] = {
+		.flags = IORESOURCE_DMA,
+		.start = DMOV_TSIF_CHAN,
+		.end   = DMOV_TSIF_CRCI,
+	},
+};
+
+struct platform_device msm_8064_device_tsif[2] = {
+	{
+		.name          = "msm_tsif",
+		.id            = 0,
+		.num_resources = ARRAY_SIZE(tsif0_8064_resources),
+		.resource      = tsif0_8064_resources,
+		.dev = {
+			.platform_data = &tsif0_8064_platform_data
+		},
+	},
+	{
+		.name          = "msm_tsif",
+		.id            = 1,
+		.num_resources = ARRAY_SIZE(tsif1_8064_resources),
+		.resource      = tsif1_8064_resources,
+		.dev = {
+			.platform_data = &tsif1_8064_platform_data
+		},
+	}
+};
+
 /*
  * Machine specific data for AUX PCM Interface
  * which the driver will  be unware of.
@@ -2236,6 +2344,7 @@
 	RIVA_APPS_WLAN_SMSM_IRQ,
 	RIVA_APPS_WLAN_RX_DATA_AVAIL_IRQ,
 	RIVA_APPS_WLAN_DATA_XFER_DONE_IRQ,
+	PM8821_SEC_IRQ_N,
 };
 
 struct msm_mpm_device_data apq8064_mpm_dev_data __initdata = {
diff --git a/arch/arm/mach-msm/devices-8930.c b/arch/arm/mach-msm/devices-8930.c
index 1f954c8..7cb6e95 100644
--- a/arch/arm/mach-msm/devices-8930.c
+++ b/arch/arm/mach-msm/devices-8930.c
@@ -30,6 +30,7 @@
 #include "devices.h"
 #include "rpm_log.h"
 #include "rpm_stats.h"
+#include "rpm_rbcpr_stats.h"
 #include "footswitch.h"
 
 #ifdef CONFIG_MSM_MPM
@@ -287,6 +288,31 @@
 	},
 };
 
+static struct resource msm_rpm_rbcpr_resource = {
+	.start = 0x0010CB00,
+	.end = 0x0010CB00 + SZ_8K - 1,
+	.flags = IORESOURCE_MEM,
+};
+
+static struct msm_rpmrbcpr_platform_data msm_rpm_rbcpr_pdata = {
+	.rbcpr_data = {
+		.upside_steps = 1,
+		.downside_steps = 2,
+		.svs_voltage = 1050000,
+		.nominal_voltage = 1162500,
+		.turbo_voltage = 1287500,
+	},
+};
+
+struct platform_device msm8930_rpm_rbcpr_device = {
+	.name = "msm_rpm_rbcpr",
+	.id = -1,
+	.dev = {
+		.platform_data = &msm_rpm_rbcpr_pdata,
+	},
+	.resource = &msm_rpm_rbcpr_resource,
+};
+
 static int msm8930_LPM_latency = 1000; /* >100 usec for WFI */
 
 struct platform_device msm8930_cpu_idle_device = {
@@ -365,6 +391,11 @@
 	.id		= -1,
 };
 
+struct platform_device msm8930aa_device_acpuclk = {
+	.name		= "acpuclk-8930aa",
+	.id		= -1,
+};
+
 static struct fs_driver_data gfx3d_fs_data = {
 	.clks = (struct fs_clk_data[]){
 		{ .name = "core_clk", .reset_rate = 27000000 },
@@ -636,6 +667,58 @@
 		.ib  = 10000000,
 	},
 };
+static struct msm_bus_vectors vidc_venc_1080p_turbo_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 222298112,
+		.ib  = 3522000000U,
+	},
+	{
+		.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 330301440,
+		.ib  = 3522000000U,
+	},
+	{
+		.src = MSM_BUS_MASTER_AMPSS_M0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 2500000,
+		.ib  = 700000000,
+	},
+	{
+		.src = MSM_BUS_MASTER_AMPSS_M0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 2500000,
+		.ib  = 10000000,
+	},
+};
+static struct msm_bus_vectors vidc_vdec_1080p_turbo_vectors[] = {
+	{
+		.src = MSM_BUS_MASTER_HD_CODEC_PORT0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 222298112,
+		.ib  = 3522000000U,
+	},
+	{
+		.src = MSM_BUS_MASTER_HD_CODEC_PORT1,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 330301440,
+		.ib  = 3522000000U,
+	},
+	{
+		.src = MSM_BUS_MASTER_AMPSS_M0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 2500000,
+		.ib  = 700000000,
+	},
+	{
+		.src = MSM_BUS_MASTER_AMPSS_M0,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 2500000,
+		.ib  = 10000000,
+	},
+};
 
 static struct msm_bus_paths vidc_bus_client_config[] = {
 	{
@@ -666,6 +749,14 @@
 		ARRAY_SIZE(vidc_vdec_1080p_vectors),
 		vidc_vdec_1080p_vectors,
 	},
+	{
+		ARRAY_SIZE(vidc_venc_1080p_turbo_vectors),
+		vidc_vdec_1080p_turbo_vectors,
+	},
+	{
+		ARRAY_SIZE(vidc_vdec_1080p_turbo_vectors),
+		vidc_vdec_1080p_turbo_vectors,
+	},
 };
 
 static struct msm_bus_scale_pdata vidc_bus_client_data = {
diff --git a/arch/arm/mach-msm/devices-8960.c b/arch/arm/mach-msm/devices-8960.c
index 9a0c2d7..b1ebe33 100644
--- a/arch/arm/mach-msm/devices-8960.c
+++ b/arch/arm/mach-msm/devices-8960.c
@@ -1788,8 +1788,8 @@
 struct msm_tsif_platform_data tsif1_platform_data = {
 	.num_gpios = ARRAY_SIZE(tsif1_gpios),
 	.gpios = tsif1_gpios,
-	.tsif_pclk = "tsif_pclk",
-	.tsif_ref_clk = "tsif_ref_clk",
+	.tsif_pclk = "iface_clk",
+	.tsif_ref_clk = "ref_clk",
 };
 
 struct resource tsif1_resources[] = {
@@ -1813,8 +1813,8 @@
 struct msm_tsif_platform_data tsif0_platform_data = {
 	.num_gpios = ARRAY_SIZE(tsif0_gpios),
 	.gpios = tsif0_gpios,
-	.tsif_pclk = "tsif_pclk",
-	.tsif_ref_clk = "tsif_ref_clk",
+	.tsif_pclk = "iface_clk",
+	.tsif_ref_clk = "ref_clk",
 };
 struct resource tsif0_resources[] = {
 	[0] = {
diff --git a/arch/arm/mach-msm/devices-msm7x27a.c b/arch/arm/mach-msm/devices-msm7x27a.c
index 8912e96..96984fb 100644
--- a/arch/arm/mach-msm/devices-msm7x27a.c
+++ b/arch/arm/mach-msm/devices-msm7x27a.c
@@ -236,12 +236,22 @@
 	.max_speed_delta_khz = 604800,
 };
 
+static struct acpuclk_pdata msm8625ab_acpuclk_pdata = {
+	.max_speed_delta_khz = 801600,
+};
+
 struct platform_device msm8625_device_acpuclk = {
 	.name		= "acpuclk-7627",
 	.id		= -1,
 	.dev.platform_data = &msm8625_acpuclk_pdata,
 };
 
+struct platform_device msm8625ab_device_acpuclk = {
+	.name		= "acpuclk-7627",
+	.id		= -1,
+	.dev.platform_data = &msm8625ab_acpuclk_pdata,
+};
+
 struct platform_device msm_device_smd = {
 	.name	= "msm_smd",
 	.id	= -1,
@@ -1623,6 +1633,7 @@
 enum {
 	MSM8625,
 	MSM8625A,
+	MSM8625AB,
 };
 
 static int __init msm8625_cpu_id(void)
@@ -1643,6 +1654,11 @@
 	case 0x781:
 		cpu = MSM8625A;
 		break;
+	case 0x775:
+	case 0x776:
+	case 0x782:
+		cpu = MSM8625AB;
+		break;
 	default:
 		pr_err("Invalid Raw ID\n");
 		return -ENODEV;
@@ -1665,10 +1681,11 @@
 			platform_device_register(&msm7x27aa_device_acpuclk);
 		else if (msm8625_cpu_id() == MSM8625A)
 			platform_device_register(&msm8625_device_acpuclk);
+		else if (msm8625_cpu_id() == MSM8625AB)
+			platform_device_register(&msm8625ab_device_acpuclk);
 	} else {
 		platform_device_register(&msm7x27a_device_acpuclk);
 	}
-
 	return 0;
 }
 
diff --git a/arch/arm/mach-msm/devices-msm7x2xa.h b/arch/arm/mach-msm/devices-msm7x2xa.h
index 4184a86..8febe26 100644
--- a/arch/arm/mach-msm/devices-msm7x2xa.h
+++ b/arch/arm/mach-msm/devices-msm7x2xa.h
@@ -33,4 +33,5 @@
 void __init msm8x25_spm_device_init(void);
 void __init msm8x25_kgsl_3d0_init(void);
 void __iomem *core1_reset_base(void);
+extern void setup_mm_for_reboot(void);
 #endif
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index daf70a8..886a9ae 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -189,6 +189,7 @@
 #endif
 
 extern struct platform_device msm_device_tsif[2];
+extern struct platform_device msm_8064_device_tsif[2];
 
 extern struct platform_device msm_device_ssbi_pmic1;
 extern struct platform_device msm_device_ssbi_pmic2;
@@ -328,6 +329,7 @@
 extern struct platform_device msm8930_rpm_device;
 extern struct platform_device msm8930_rpm_stat_device;
 extern struct platform_device msm8930_rpm_log_device;
+extern struct platform_device msm8930_rpm_rbcpr_device;
 
 extern struct platform_device msm8660_rpm_device;
 extern struct platform_device msm8660_rpm_stat_device;
@@ -423,8 +425,10 @@
 extern struct platform_device apq8064_device_acpuclk;
 extern struct platform_device msm8625_device_acpuclk;
 extern struct platform_device msm8627_device_acpuclk;
+extern struct platform_device msm8625ab_device_acpuclk;
 extern struct platform_device msm8x50_device_acpuclk;
 extern struct platform_device msm8x60_device_acpuclk;
 extern struct platform_device msm8930_device_acpuclk;
+extern struct platform_device msm8930aa_device_acpuclk;
 extern struct platform_device msm8960_device_acpuclk;
 extern struct platform_device msm9615_device_acpuclk;
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index f21d296..1770b97 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -184,10 +184,11 @@
 	REG_LDO,
 	REG_VS,
 	REG_GPIO,
+	REG_MAX
 };
 
 struct camera_vreg_t {
-	char *reg_name;
+	const char *reg_name;
 	enum camera_vreg_type type;
 	int min_voltage;
 	int max_voltage;
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
index 43d707e..a51a6b5 100644
--- a/arch/arm/mach-msm/include/mach/camera.h
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -85,13 +85,12 @@
 	VFE_MSG_SYNC_TIMER1,
 	VFE_MSG_SYNC_TIMER2,
 	VFE_MSG_COMMON,
-	VFE_MSG_V32_START,
-	VFE_MSG_V32_START_RECORDING, /* 20 */
-	VFE_MSG_V32_CAPTURE,
-	VFE_MSG_V32_JPEG_CAPTURE,
+	VFE_MSG_START,
+	VFE_MSG_START_RECORDING, /* 20 */
+	VFE_MSG_CAPTURE,
+	VFE_MSG_JPEG_CAPTURE,
 	VFE_MSG_OUTPUT_IRQ,
-	VFE_MSG_V2X_PREVIEW,
-	VFE_MSG_V2X_CAPTURE,
+	VFE_MSG_PREVIEW,
 	VFE_MSG_OUTPUT_PRIMARY,
 	VFE_MSG_OUTPUT_SECONDARY,
 	VFE_MSG_OUTPUT_TERTIARY1,
@@ -291,6 +290,109 @@
 	int (*strobe_flash_charge)(int32_t, int32_t, uint32_t);
 };
 
+enum cci_i2c_master_t {
+	MASTER_0,
+	MASTER_1,
+};
+
+enum cci_i2c_queue_t {
+	QUEUE_0,
+	QUEUE_1,
+};
+
+struct msm_camera_cci_client {
+	struct v4l2_subdev *cci_subdev;
+	uint32_t freq;
+	enum cci_i2c_master_t cci_i2c_master;
+	uint16_t sid;
+	uint16_t cid;
+	uint32_t timeout;
+	uint16_t retries;
+	uint16_t id_map;
+};
+
+enum msm_cci_cmd_type {
+	MSM_CCI_INIT,
+	MSM_CCI_RELEASE,
+	MSM_CCI_SET_SID,
+	MSM_CCI_SET_FREQ,
+	MSM_CCI_SET_SYNC_CID,
+	MSM_CCI_I2C_READ,
+	MSM_CCI_I2C_WRITE,
+	MSM_CCI_GPIO_WRITE,
+};
+
+struct msm_camera_cci_wait_sync_cfg {
+	uint16_t line;
+	uint16_t delay;
+};
+
+struct msm_camera_cci_gpio_cfg {
+	uint16_t gpio_queue;
+	uint16_t i2c_queue;
+};
+
+enum msm_camera_i2c_reg_addr_type {
+	MSM_CAMERA_I2C_BYTE_ADDR = 1,
+	MSM_CAMERA_I2C_WORD_ADDR,
+};
+
+enum msm_camera_i2c_data_type {
+	MSM_CAMERA_I2C_BYTE_DATA = 1,
+	MSM_CAMERA_I2C_WORD_DATA,
+	MSM_CAMERA_I2C_SET_BYTE_MASK,
+	MSM_CAMERA_I2C_UNSET_BYTE_MASK,
+	MSM_CAMERA_I2C_SET_WORD_MASK,
+	MSM_CAMERA_I2C_UNSET_WORD_MASK,
+	MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA,
+};
+
+enum msm_camera_i2c_cmd_type {
+	MSM_CAMERA_I2C_CMD_WRITE,
+	MSM_CAMERA_I2C_CMD_POLL,
+};
+
+struct msm_camera_i2c_reg_conf {
+	uint16_t reg_addr;
+	uint16_t reg_data;
+	enum msm_camera_i2c_data_type dt;
+	enum msm_camera_i2c_cmd_type cmd_type;
+	int16_t mask;
+};
+
+struct msm_camera_cci_i2c_write_cfg {
+	struct msm_camera_i2c_reg_conf *reg_conf_tbl;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	enum msm_camera_i2c_data_type data_type;
+	uint16_t size;
+};
+
+struct msm_camera_cci_i2c_read_cfg {
+	uint16_t addr;
+	enum msm_camera_i2c_reg_addr_type addr_type;
+	uint8_t *data;
+	uint16_t num_byte;
+};
+
+struct msm_camera_cci_i2c_queue_info {
+	uint32_t max_queue_size;
+	uint32_t report_id;
+	uint32_t irq_en;
+	uint32_t capture_rep_data;
+};
+
+struct msm_camera_cci_ctrl {
+	int32_t status;
+	struct msm_camera_cci_client *cci_info;
+	enum msm_cci_cmd_type cmd;
+	union {
+		struct msm_camera_cci_i2c_write_cfg cci_i2c_write_cfg;
+		struct msm_camera_cci_i2c_read_cfg cci_i2c_read_cfg;
+		struct msm_camera_cci_wait_sync_cfg cci_wait_sync_cfg;
+		struct msm_camera_cci_gpio_cfg gpio_cfg;
+	} cfg;
+};
+
 /* this structure is used in kernel */
 struct msm_queue_cmd {
 	struct list_head list_config;
diff --git a/arch/arm/mach-msm/include/mach/iommu.h b/arch/arm/mach-msm/include/mach/iommu.h
index 28c53db..b14f145 100644
--- a/arch/arm/mach-msm/include/mach/iommu.h
+++ b/arch/arm/mach-msm/include/mach/iommu.h
@@ -72,6 +72,7 @@
  * @irq:	Interrupt number
  * @clk:	The bus clock for this IOMMU hardware instance
  * @pclk:	The clock for the IOMMU bus interconnect
+ * @aclk:	Alternate clock for this IOMMU core, if any
  * @name:	Human-readable name of this IOMMU device
  * @gdsc:	Regulator needed to power this HW block (v2 only)
  * @nsmr:	Size of the SMT on this HW block (v2 only)
@@ -85,6 +86,7 @@
 	int ttbr_split;
 	struct clk *clk;
 	struct clk *pclk;
+	struct clk *aclk;
 	const char *name;
 	struct regulator *gdsc;
 	unsigned int nsmr;
diff --git a/arch/arm/mach-msm/include/mach/mpm.h b/arch/arm/mach-msm/include/mach/mpm.h
index 76f1ea6..fabaa09 100644
--- a/arch/arm/mach-msm/include/mach/mpm.h
+++ b/arch/arm/mach-msm/include/mach/mpm.h
@@ -109,13 +109,14 @@
 /**
  * msm_mpm_enter_sleep() -Called from PM code before entering low power mode
  *
+ * @sclk_count: wakeup time in sclk counts for programmed RPM wakeup
  * @from_idle: indicates if the sytem is entering low power mode as a part of
  *		suspend/idle task.
  *
  * Low power management code calls into this API to configure the MPM to
  * monitor the active irqs before going to sleep.
  */
-void msm_mpm_enter_sleep(bool from_idle);
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle);
 /**
  * msm_mpm_exit_sleep() -Called from PM code after resuming from low power mode
  *
@@ -158,11 +159,8 @@
 { return false; }
 static inline bool msm_mpm_gpio_irqs_detectable(bool from_idle)
 { return false; }
-static inline void msm_mpm_enter_sleep(bool from_idle) {}
+static inline void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle) {}
 static inline void msm_mpm_exit_sleep(bool from_idle) {}
 static inline void __init of_mpm_init(struct device_node *node) {}
 #endif
-
-
-
 #endif /* __ARCH_ARM_MACH_MSM_MPM_H */
diff --git a/arch/arm/mach-msm/include/mach/msm_bus_board.h b/arch/arm/mach-msm/include/mach/msm_bus_board.h
index d95e4a4..0a53b46 100644
--- a/arch/arm/mach-msm/include/mach/msm_bus_board.h
+++ b/arch/arm/mach-msm/include/mach/msm_bus_board.h
@@ -65,6 +65,7 @@
 extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata;
 extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata;
 extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata;
 extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata;
 extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata;
 
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
index dc633fb..97c03e7 100644
--- a/arch/arm/mach-msm/include/mach/msm_smd.h
+++ b/arch/arm/mach-msm/include/mach/msm_smd.h
@@ -223,6 +223,19 @@
  */
 void smd_disable_read_intr(smd_channel_t *ch);
 
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch:      open channel handle to use for the edge
+ * @mask:    1 = mask interrupts; 0 = unmask interrupts
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels.  As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask);
+
 /* Starts a packet transaction.  The size of the packet may exceed the total
  * size of the smd ring buffer.
  *
@@ -389,6 +402,11 @@
 {
 }
 
+static inline int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
+{
+	return -ENODEV;
+}
+
 static inline int smd_write_start(smd_channel_t *ch, int len)
 {
 	return -ENODEV;
diff --git a/arch/arm/mach-msm/include/mach/socinfo.h b/arch/arm/mach-msm/include/mach/socinfo.h
index 2c3d395..f7ba507 100644
--- a/arch/arm/mach-msm/include/mach/socinfo.h
+++ b/arch/arm/mach-msm/include/mach/socinfo.h
@@ -330,4 +330,16 @@
 #endif
 }
 
+static inline int cpu_is_msm8974(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+	enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+	BUG_ON(cpu == MSM_CPU_UNKNOWN);
+	return cpu == MSM_CPU_8974;
+#else
+	return 0;
+#endif
+}
+
 #endif
diff --git a/arch/arm/mach-msm/lpm_levels.c b/arch/arm/mach-msm/lpm_levels.c
index ea368ae..f7456ef 100644
--- a/arch/arm/mach-msm/lpm_levels.c
+++ b/arch/arm/mach-msm/lpm_levels.c
@@ -20,6 +20,7 @@
 #include <mach/mpm.h>
 #include "lpm_resources.h"
 #include "pm.h"
+#include "rpm-notifier.h"
 
 static struct msm_rpmrs_level *msm_lpm_levels;
 static int msm_lpm_level_count;
@@ -40,15 +41,23 @@
 		bool from_idle, bool notify_rpm)
 {
 	int ret = 0;
+	struct msm_rpmrs_limits *l = (struct msm_rpmrs_limits *)limits;
 
-	ret = msm_lpmrs_enter_sleep((struct msm_rpmrs_limits *)limits,
-					from_idle, notify_rpm);
+	ret = msm_rpm_enter_sleep();
+	if (ret) {
+		pr_warn("%s(): RPM failed to enter sleep err:%d\n",
+				__func__, ret);
+		goto bail;
+	}
+	ret = msm_lpmrs_enter_sleep(sclk_count, l, from_idle, notify_rpm);
+bail:
 	return ret;
 }
 
 static void msm_lpm_exit_sleep(void *limits, bool from_idle,
 		bool notify_rpm, bool collapsed)
 {
+	msm_rpm_exit_sleep();
 	msm_lpmrs_exit_sleep((struct msm_rpmrs_limits *)limits,
 				from_idle, notify_rpm, collapsed);
 }
diff --git a/arch/arm/mach-msm/lpm_resources.c b/arch/arm/mach-msm/lpm_resources.c
index e5be352..0758651 100644
--- a/arch/arm/mach-msm/lpm_resources.c
+++ b/arch/arm/mach-msm/lpm_resources.c
@@ -46,7 +46,7 @@
 static bool msm_lpm_get_rpm_notif = true;
 
 /*Macros*/
-#define VDD_DIG_ACTIVE		(950000)
+#define VDD_DIG_ACTIVE		(5)
 #define VDD_MEM_ACTIVE		(1050000)
 #define MAX_RS_NAME		(16)
 #define MAX_RS_SIZE		(4)
@@ -264,7 +264,7 @@
 		return ret;
 	}
 
-	ret = msm_rpm_wait_for_ack(msg_id);
+	ret = msm_rpm_wait_for_ack_noirq(msg_id);
 	if (ret < 0) {
 		pr_err("%s: Couldn't get ACK from RPM for Msg %d Error %d",
 				__func__, msg_id, ret);
@@ -619,12 +619,10 @@
 	msm_lpm_notify_common(rpm_notifier_cb, rs);
 }
 
-/* MPM
-static bool msm_lpm_use_mpm(struct msm_rpmrs_limits *limits)
+static inline bool msm_lpm_use_mpm(struct msm_rpmrs_limits *limits)
 {
-	return ((limits->pxo == MSM_LPM_PXO_OFF) ||
-		(limits->vdd_dig_lower_bound <= VDD_DIG_RET_HIGH));
-}*/
+	return (limits->pxo == MSM_LPM_PXO_OFF);
+}
 
 /* LPM levels interface */
 bool msm_lpm_level_beyond_limit(struct msm_rpmrs_limits *limits)
@@ -647,7 +645,7 @@
 	return beyond_limit;
 }
 
-int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
+int msm_lpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
 				bool from_idle, bool notify_rpm)
 {
 	int ret = 0;
@@ -668,9 +666,8 @@
 	}
 	msm_lpm_get_rpm_notif = true;
 
-	/* MPM Enter sleep
 	if (msm_lpm_use_mpm(limits))
-		msm_mpm_enter_sleep(from_idle);*/
+		msm_mpm_enter_sleep(sclk_count, from_idle);
 
 	return ret;
 }
diff --git a/arch/arm/mach-msm/lpm_resources.h b/arch/arm/mach-msm/lpm_resources.h
index bc06d7b..120832f 100644
--- a/arch/arm/mach-msm/lpm_resources.h
+++ b/arch/arm/mach-msm/lpm_resources.h
@@ -71,6 +71,7 @@
  * on the low power mode being entered. L2 low power mode is also set in
  * this function.
 
+ * @sclk_count: wakeup counter for RPM.
  * @limits: pointer to the resource limits of the low power mode being entered.
  * @from_idle: bool to determine if this call being made as a part of
  *             idle power collapse.
@@ -78,7 +79,7 @@
  *
  * returns 0 on success.
  */
-int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
+int msm_lpmrs_enter_sleep(uint32_t sclk_count, struct msm_rpmrs_limits *limits,
 	bool from_idle, bool notify_rpm);
 
 /**
@@ -106,8 +107,8 @@
 	return true;
 }
 
-static inline int msm_lpmrs_enter_sleep(struct msm_rpmrs_limits *limits,
-	bool from_idle, bool notify_rpm)
+static inline int msm_lpmrs_enter_sleep(uint32_t sclk_count,
+	struct msm_rpmrs_limits *limits, bool from_idle, bool notify_rpm)
 {
 	return 0;
 }
diff --git a/arch/arm/mach-msm/mpm-of.c b/arch/arm/mach-msm/mpm-of.c
index 1832301..e4c0e4e 100644
--- a/arch/arm/mach-msm/mpm-of.c
+++ b/arch/arm/mach-msm/mpm-of.c
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <asm/hardware/gic.h>
+#include <asm/arch_timer.h>
 #include <mach/gpio.h>
 #include <mach/mpm.h>
 
@@ -70,7 +71,8 @@
 #define MSM_MPM_DETECT_CTL_SHIFT(irq) ((irq % 16) * 2)
 
 #define hashfn(val) (val % MSM_MPM_NR_MPM_IRQS)
-
+#define SCLK_HZ (32768)
+#define ARCH_TIMER_HZ (19200000)
 static struct msm_mpm_device_data msm_mpm_dev_data;
 
 enum mpm_reg_offsets {
@@ -152,14 +154,20 @@
 	return IRQ_HANDLED;
 }
 
-static void msm_mpm_set(bool wakeset)
+static void msm_mpm_set(cycle_t wakeup, bool wakeset)
 {
 	uint32_t *irqs;
 	unsigned int reg;
 	int i;
+	uint32_t *expiry_timer;
+
+	expiry_timer = (uint32_t *)&wakeup;
 
 	irqs = wakeset ? msm_mpm_wake_irq : msm_mpm_enabled_irq;
 	for (i = 0; i < MSM_MPM_REG_WIDTH; i++) {
+		reg = MSM_MPM_REG_WAKEUP;
+		msm_mpm_write(reg, i, expiry_timer[i]);
+
 		reg = MSM_MPM_REG_ENABLE;
 		msm_mpm_write(reg, i, irqs[i]);
 
@@ -448,14 +456,23 @@
 	return true;
 }
 
-void msm_mpm_enter_sleep(bool from_idle)
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
 {
+	cycle_t wakeup = (u64)sclk_count * ARCH_TIMER_HZ;
+
 	if (!msm_mpm_is_initialized()) {
 		pr_err("%s(): MPM not initialized\n", __func__);
 		return;
 	}
 
-	msm_mpm_set(!from_idle);
+	if (sclk_count) {
+		do_div(wakeup, SCLK_HZ);
+		wakeup += arch_counter_get_cntpct();
+	} else {
+		wakeup = (~0ULL);
+	}
+
+	msm_mpm_set(wakeup, !from_idle);
 }
 
 void msm_mpm_exit_sleep(bool from_idle)
@@ -554,7 +571,15 @@
 		pr_info("%s(): request_irq failed errno: %d\n", __func__, ret);
 		goto failed_irq_get;
 	}
-	msm_mpm_initialized &= MSM_MPM_DEVICE_PROBED;
+	ret = irq_set_irq_wake(dev->mpm_ipc_irq, 1);
+
+	if (ret) {
+		pr_err("%s: failed to set wakeup irq %u: %d\n",
+			__func__, dev->mpm_ipc_irq, ret);
+		goto failed_irq_get;
+
+	}
+	msm_mpm_initialized |= MSM_MPM_DEVICE_PROBED;
 
 	return 0;
 
@@ -701,9 +726,10 @@
 		}
 
 	}
-	msm_mpm_initialized &= MSM_MPM_IRQ_MAPPING_DONE;
+	msm_mpm_initialized |= MSM_MPM_IRQ_MAPPING_DONE;
 
 	return;
+
 failed_malloc:
 	for (i = 0; i < MSM_MPM_NR_MPM_IRQS; i++) {
 		mpm_of_map[i].chip->irq_mask = NULL;
diff --git a/arch/arm/mach-msm/mpm.c b/arch/arm/mach-msm/mpm.c
index b395b61..1c39415 100644
--- a/arch/arm/mach-msm/mpm.c
+++ b/arch/arm/mach-msm/mpm.c
@@ -421,7 +421,7 @@
 			MSM_MPM_NR_APPS_IRQS);
 }
 
-void msm_mpm_enter_sleep(bool from_idle)
+void msm_mpm_enter_sleep(uint32_t sclk_count, bool from_idle)
 {
 	msm_mpm_set(!from_idle);
 }
diff --git a/arch/arm/mach-msm/msm-buspm-dev.c b/arch/arm/mach-msm/msm-buspm-dev.c
index 296418d..a818eed 100644
--- a/arch/arm/mach-msm/msm-buspm-dev.c
+++ b/arch/arm/mach-msm/msm-buspm-dev.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -40,6 +40,13 @@
 	return (dev) ? dev->vaddr : NULL;
 }
 
+static inline unsigned int msm_buspm_dev_get_buflen(struct file *filp)
+{
+	struct msm_buspm_map_dev *dev = filp->private_data;
+
+	return dev ? dev->buflen : 0;
+}
+
 static inline unsigned long msm_buspm_dev_get_paddr(struct file *filp)
 {
 	struct msm_buspm_map_dev *dev = filp->private_data;
@@ -114,6 +121,7 @@
 	unsigned long paddr;
 	int retval = 0;
 	void *buf = msm_buspm_dev_get_vaddr(filp);
+	unsigned int buflen = msm_buspm_dev_get_buflen(filp);
 	unsigned char *dbgbuf = buf;
 
 	switch (cmd) {
@@ -156,7 +164,7 @@
 			break;
 		}
 
-		if ((xfer.size <= sizeof(buf)) &&
+		if ((xfer.size <= buflen) &&
 			(copy_to_user((void __user *)xfer.data, buf,
 					xfer.size))) {
 			retval = -EFAULT;
@@ -177,7 +185,7 @@
 			break;
 		}
 
-		if ((sizeof(buf) <= xfer.size) &&
+		if ((buflen <= xfer.size) &&
 			(copy_from_user(buf, (void __user *)xfer.data,
 			xfer.size))) {
 			retval = -EFAULT;
diff --git a/arch/arm/mach-msm/msm-krait-l2-accessors.c b/arch/arm/mach-msm/msm-krait-l2-accessors.c
index 41a2490..3da155a 100644
--- a/arch/arm/mach-msm/msm-krait-l2-accessors.c
+++ b/arch/arm/mach-msm/msm-krait-l2-accessors.c
@@ -82,10 +82,6 @@
 	u32 uninitialized_var(l2cpuvrf8_val), l2cpuvrf8_addr = 0;
 	u32 ret_val;
 
-	/* CP15 registers are not emulated on RUMI3. */
-	if (machine_is_msm8960_rumi3())
-		return 0;
-
 	raw_spin_lock_irqsave(&l2_access_lock, flags);
 
 	if (l2cpuvrf8_needs_fix(reg_addr))
@@ -115,10 +111,6 @@
 	unsigned long flags;
 	u32 uninitialized_var(l2cpuvrf8_val), l2cpuvrf8_addr = 0;
 
-	/* CP15 registers are not emulated on RUMI3. */
-	if (machine_is_msm8960_rumi3())
-		return;
-
 	raw_spin_lock_irqsave(&l2_access_lock, flags);
 
 	if (l2cpuvrf8_needs_fix(reg_addr))
@@ -144,9 +136,6 @@
 {
 	u32 val;
 	unsigned long flags;
-	/* CP15 registers are not emulated on RUMI3. */
-	if (machine_is_msm8960_rumi3())
-		return 0;
 
 	raw_spin_lock_irqsave(&l2_access_lock, flags);
 	asm volatile ("mcr     p15, 3, %[l2cpselr], c15, c0, 6\n\t"
diff --git a/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c b/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
index 7ede23d..9ba9f7b1 100644
--- a/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
+++ b/arch/arm/mach-msm/msm_bus/msm_bus_board_8960.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,12 @@
 	MSM_BUS_TIERED_SLAVE_KMPSS_L2,
 };
 
+enum msm_bus_sg_tiered_slaves_type {
+	SG_TIERED_SLAVE_MM_IMEM = 1,
+	SG_MMSS_TIERED_SLAVE_FAB_APPS_0,
+	SG_MMSS_TIERED_SLAVE_FAB_APPS_1,
+};
+
 enum msm_bus_8960_master_ports_type {
 	MSM_BUS_SYSTEM_MASTER_PORT_APPSS_FAB = 0,
 	MSM_BUS_MASTER_PORT_SPS,
@@ -106,6 +112,23 @@
 	MSM_BUS_SLAVE_PORT_RIVA,
 };
 
+enum msm_bus_8960_sg_master_ports_type {
+	MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0 =
+		MSM_BUS_MMSS_MASTER_PORT_UNUSED_2,
+	MSM_BUS_MASTER_PORT_VIDEO_CAP =
+		MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE0,
+	MSM_BUS_MASTER_PORT_VIDEO_DEC =
+		MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE1,
+	MSM_BUS_MASTER_PORT_VIDEO_ENC =
+		MSM_BUS_MASTER_PORT_HD_CODEC_PORT0,
+};
+
+enum msm_bus_8960_sg_slave_ports_type {
+	SG_SLAVE_PORT_MM_IMEM = 0,
+	SG_MMSS_SLAVE_PORT_APPS_FAB_0,
+	SG_MMSS_SLAVE_PORT_APPS_FAB_1,
+};
+
 static int tier2[] = {MSM_BUS_BW_TIER2,};
 static uint32_t master_iids[NMASTERS];
 static uint32_t slave_iids[NSLAVES];
@@ -424,6 +447,10 @@
 static int mport_mdp1[] = {MSM_BUS_MASTER_PORT_MDP_PORT1,};
 static int mport_rotator[] = {MSM_BUS_MASTER_PORT_ROTATOR,};
 static int mport_graphics_3d[] = {MSM_BUS_MASTER_PORT_GRAPHICS_3D,};
+static int pro_mport_graphics_3d[] = {
+	MSM_BUS_MASTER_PORT_GRAPHICS_3D_PORT0,
+	MSM_BUS_MASTER_PORT_GRAPHICS_3D,
+};
 static int mport_jpeg_dec[] = {MSM_BUS_MASTER_PORT_JPEG_DEC,};
 static int mport_graphics_2d_core0[] = {MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE0,};
 static int mport_vfe[] = {MSM_BUS_MASTER_PORT_VFE,};
@@ -432,6 +459,9 @@
 static int mport_graphics_2d_core1[] = {MSM_BUS_MASTER_PORT_GRAPHICS_2D_CORE1,};
 static int mport_hd_codec_port0[] = {MSM_BUS_MASTER_PORT_HD_CODEC_PORT0,};
 static int mport_hd_codec_port1[] = {MSM_BUS_MASTER_PORT_HD_CODEC_PORT1,};
+static int mport_video_cap[] = {MSM_BUS_MASTER_PORT_VIDEO_CAP};
+static int mport_video_enc[] = {MSM_BUS_MASTER_PORT_VIDEO_ENC};
+static int mport_video_dec[] = {MSM_BUS_MASTER_PORT_VIDEO_DEC};
 static int appss_mport_fab_mmss[] = {
 	MSM_BUS_APPSS_MASTER_PORT_FAB_MMSS_0,
 	MSM_BUS_APPSS_MASTER_PORT_FAB_MMSS_1
@@ -439,15 +469,25 @@
 
 static int mmss_sport_apps_fab[] = {
 	MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_0,
-	MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_1
+	MSM_BUS_MMSS_SLAVE_PORT_APPS_FAB_1,
+};
+static int sg_sport_apps_fab[] = {
+	SG_MMSS_SLAVE_PORT_APPS_FAB_0,
+	SG_MMSS_SLAVE_PORT_APPS_FAB_1,
 };
 static int sport_mm_imem[] = {MSM_BUS_SLAVE_PORT_MM_IMEM,};
+static int sg_sport_mm_imem[] = {SG_SLAVE_PORT_MM_IMEM,};
 
 static int mmss_tiered_slave_fab_apps[] = {
 	MSM_BUS_MMSS_TIERED_SLAVE_FAB_APPS_0,
 	MSM_BUS_MMSS_TIERED_SLAVE_FAB_APPS_1,
 };
+static int sg_tiered_slave_fab_apps[] = {
+	SG_MMSS_TIERED_SLAVE_FAB_APPS_0,
+	SG_MMSS_TIERED_SLAVE_FAB_APPS_1,
+};
 static int tiered_slave_mm_imem[] = {MSM_BUS_TIERED_SLAVE_MM_IMEM,};
+static int sg_tiered_slave_mm_imem[] = {SG_TIERED_SLAVE_MM_IMEM,};
 
 
 static struct msm_bus_node_info mmss_fabric_info[]  = {
@@ -557,6 +597,106 @@
 	},
 };
 
+static struct msm_bus_node_info sg_mmss_fabric_info[]  = {
+	{
+		.id = MSM_BUS_MASTER_MDP_PORT0,
+		.masterp = mport_mdp,
+		.num_mports = ARRAY_SIZE(mport_mdp),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+	},
+	{
+		.id = MSM_BUS_MASTER_MDP_PORT1,
+		.masterp = mport_mdp1,
+		.num_mports = ARRAY_SIZE(mport_mdp1),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+	},
+	{
+		.id = MSM_BUS_MASTER_ROTATOR,
+		.masterp = mport_rotator,
+		.num_mports = ARRAY_SIZE(mport_rotator),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+	},
+	{
+		.id = MSM_BUS_MASTER_GRAPHICS_3D,
+		.masterp = pro_mport_graphics_3d,
+		.num_mports = ARRAY_SIZE(pro_mport_graphics_3d),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+	},
+	{
+		.id = MSM_BUS_MASTER_JPEG_DEC,
+		.masterp = mport_jpeg_dec,
+		.num_mports = ARRAY_SIZE(mport_jpeg_dec),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+	},
+	{
+		.id = MSM_BUS_MASTER_VIDEO_CAP,
+		.masterp = mport_video_cap,
+		.num_mports = ARRAY_SIZE(mport_video_cap),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+	},
+	{
+		.id = MSM_BUS_MASTER_VFE,
+		.masterp = mport_vfe,
+		.num_mports = ARRAY_SIZE(mport_vfe),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+	},
+	{
+		.id = MSM_BUS_MASTER_VPE,
+		.masterp = mport_vpe,
+		.num_mports = ARRAY_SIZE(mport_vpe),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+	},
+	{
+		.id = MSM_BUS_MASTER_JPEG_ENC,
+		.masterp = mport_jpeg_enc,
+		.num_mports = ARRAY_SIZE(mport_jpeg_enc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+	},
+	/* This port has been added for V2. It is absent in V1 */
+	{
+		.id = MSM_BUS_MASTER_VIDEO_DEC,
+		.masterp = mport_video_dec,
+		.num_mports = ARRAY_SIZE(mport_video_dec),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+	},
+	{
+		.id = MSM_BUS_MASTER_VIDEO_ENC,
+		.masterp = mport_video_enc,
+		.num_mports = ARRAY_SIZE(mport_video_enc),
+		.tier = tier2,
+		.num_tiers = ARRAY_SIZE(tier2),
+	},
+	{
+		.id = MSM_BUS_FAB_APPSS,
+		.gateway = 1,
+		.slavep = sg_sport_apps_fab,
+		.num_sports = ARRAY_SIZE(sg_sport_apps_fab),
+		.masterp = appss_mport_fab_mmss,
+		.num_mports = ARRAY_SIZE(appss_mport_fab_mmss),
+		.tier = sg_tiered_slave_fab_apps,
+		.num_tiers = ARRAY_SIZE(sg_tiered_slave_fab_apps),
+		.buswidth = 16,
+	},
+	{
+		.id = MSM_BUS_SLAVE_MM_IMEM,
+		.slavep = sg_sport_mm_imem,
+		.num_sports = ARRAY_SIZE(sg_sport_mm_imem),
+		.tier = sg_tiered_slave_mm_imem,
+		.num_tiers = ARRAY_SIZE(sg_tiered_slave_mm_imem),
+		.buswidth = 8,
+	},
+};
+
 static struct msm_bus_node_info sys_fpb_fabric_info[]  = {
 	{
 		.id = MSM_BUS_FAB_SYSTEM,
@@ -919,6 +1059,22 @@
 	.board_algo = &msm_bus_board_algo,
 };
 
+struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata = {
+	.id = MSM_BUS_FAB_MMSS,
+	.name = "msm_mm_fab",
+	sg_mmss_fabric_info,
+	ARRAY_SIZE(sg_mmss_fabric_info),
+	.ahb = 0,
+	.fabclk[DUAL_CTX] = "bus_clk",
+	.fabclk[ACTIVE_CTX] = "bus_a_clk",
+	.haltid = MSM_RPM_ID_MMSS_FABRIC_CFG_HALT_0,
+	.offset = MSM_RPM_ID_MM_FABRIC_ARB_0,
+	.nmasters = 13,
+	.nslaves = 3,
+	.ntieredslaves = 3,
+	.board_algo = &msm_bus_board_algo,
+};
+
 struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata = {
 	.id = MSM_BUS_FAB_SYSTEM_FPB,
 	.name = "msm_sys_fpb",
diff --git a/arch/arm/mach-msm/msm_rtb.c b/arch/arm/mach-msm/msm_rtb.c
index 9dbf9c1..a60c213 100644
--- a/arch/arm/mach-msm/msm_rtb.c
+++ b/arch/arm/mach-msm/msm_rtb.c
@@ -16,11 +16,13 @@
 #include <linux/kernel.h>
 #include <linux/memory_alloc.h>
 #include <linux/module.h>
+#include <linux/mod_devicetable.h>
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/atomic.h>
+#include <linux/of.h>
 #include <asm/io.h>
 #include <asm-generic/sizes.h>
 #include <mach/memory.h>
@@ -31,6 +33,8 @@
 #define SENTINEL_BYTE_2 0xAA
 #define SENTINEL_BYTE_3 0xFF
 
+#define RTB_COMPAT_STR	"qcom,msm-rtb"
+
 /* Write
  * 1) 3 bytes sentinel
  * 2) 1 bytes of log type
@@ -227,8 +231,22 @@
 #if defined(CONFIG_MSM_RTB_SEPARATE_CPUS)
 	unsigned int cpu;
 #endif
+	int ret;
 
-	msm_rtb.size = d->size;
+	if (!pdev->dev.of_node) {
+		msm_rtb.size = d->size;
+	} else {
+		int size;
+
+		ret = of_property_read_u32((&pdev->dev)->of_node,
+					"qcom,memory-reservation-size",
+					&size);
+
+		if (ret < 0)
+			return ret;
+
+		msm_rtb.size = size;
+	}
 
 	if (msm_rtb.size <= 0 || msm_rtb.size > SZ_1M)
 		return -EINVAL;
@@ -275,10 +293,17 @@
 	return 0;
 }
 
+static struct of_device_id msm_match_table[] = {
+	{.compatible = RTB_COMPAT_STR},
+	{},
+};
+EXPORT_COMPAT(RTB_COMPAT_STR);
+
 static struct platform_driver msm_rtb_driver = {
 	.driver         = {
 		.name = "msm_rtb",
-		.owner = THIS_MODULE
+		.owner = THIS_MODULE,
+		.of_match_table = msm_match_table
 	},
 };
 
diff --git a/arch/arm/mach-msm/pcie.c b/arch/arm/mach-msm/pcie.c
index f105356..d954b53 100644
--- a/arch/arm/mach-msm/pcie.c
+++ b/arch/arm/mach-msm/pcie.c
@@ -200,6 +200,16 @@
 static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
 			    int where, int size, u32 val)
 {
+	/*
+	 *Attempt to reset secondary bus is causing PCIE core to reset.
+	 *Disable secondary bus reset functionality.
+	 */
+	if ((bus->number == 0) && (where == PCI_BRIDGE_CONTROL) &&
+	    (val & PCI_BRIDGE_CTL_BUS_RESET)) {
+		pr_info("PCIE secondary bus reset not supported\n");
+		val &= ~PCI_BRIDGE_CTL_BUS_RESET;
+	}
+
 	return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
 }
 
diff --git a/arch/arm/mach-msm/perf_event_msm_krait_l2.c b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
index 3635572..103eef0 100644
--- a/arch/arm/mach-msm/perf_event_msm_krait_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_krait_l2.c
@@ -532,7 +532,7 @@
 {
 	krait_l2_pmu.plat_device = pdev;
 
-	if (!armpmu_register(&krait_l2_pmu, "kraitl2", -1))
+	if (!armpmu_register(&krait_l2_pmu, "msm-l2", -1))
 		pmu_type = krait_l2_pmu.pmu.type;
 
 	return 0;
diff --git a/arch/arm/mach-msm/perf_event_msm_l2.c b/arch/arm/mach-msm/perf_event_msm_l2.c
index aae2552..2ad36df 100644
--- a/arch/arm/mach-msm/perf_event_msm_l2.c
+++ b/arch/arm/mach-msm/perf_event_msm_l2.c
@@ -877,7 +877,7 @@
 {
 	scorpion_l2_pmu.plat_device = pdev;
 
-	if (!armpmu_register(&scorpion_l2_pmu, "scorpionl2", -1))
+	if (!armpmu_register(&scorpion_l2_pmu, "msm-l2", -1))
 		pmu_type = scorpion_l2_pmu.pmu.type;
 
 	return 0;
diff --git a/arch/arm/mach-msm/pil-q6v4.c b/arch/arm/mach-msm/pil-q6v4.c
index 131a74b..32cce1d 100644
--- a/arch/arm/mach-msm/pil-q6v4.c
+++ b/arch/arm/mach-msm/pil-q6v4.c
@@ -116,7 +116,7 @@
 	int err;
 	struct q6v4_data *drv = dev_get_drvdata(dev);
 
-	err = regulator_set_voltage(drv->vreg, 375000, 375000);
+	err = regulator_set_voltage(drv->vreg, 743750, 743750);
 	if (err) {
 		dev_err(dev, "Failed to set regulator's voltage step.\n");
 		return err;
diff --git a/arch/arm/mach-msm/pil-q6v5-mss.c b/arch/arm/mach-msm/pil-q6v5-mss.c
index e279f99..62685ca 100644
--- a/arch/arm/mach-msm/pil-q6v5-mss.c
+++ b/arch/arm/mach-msm/pil-q6v5-mss.c
@@ -155,11 +155,14 @@
 		goto err_clks;
 
 	/* Program Image Address */
-	if (drv->self_auth)
+	if (drv->self_auth) {
 		writel_relaxed(drv->start_addr, drv->rmb_base + RMB_MBA_IMAGE);
-	else
+		/* Ensure write to RMB base occurs before reset is released. */
+		mb();
+	} else {
 		writel_relaxed((drv->start_addr >> 4) & 0x0FFFFFF0,
 				drv->reg_base + QDSP6SS_RST_EVB);
+	}
 
 	ret = pil_q6v5_reset(pil);
 	if (ret)
diff --git a/arch/arm/mach-msm/pil-riva.c b/arch/arm/mach-msm/pil-riva.c
index 8a16b43f..2d1fa80 100644
--- a/arch/arm/mach-msm/pil-riva.c
+++ b/arch/arm/mach-msm/pil-riva.c
@@ -263,17 +263,17 @@
 static int pil_riva_init_image_trusted(struct pil_desc *pil,
 		const u8 *metadata, size_t size)
 {
-	return pas_init_image(PAS_RIVA, metadata, size);
+	return pas_init_image(PAS_WCNSS, metadata, size);
 }
 
 static int pil_riva_reset_trusted(struct pil_desc *pil)
 {
-	return pas_auth_and_reset(PAS_RIVA);
+	return pas_auth_and_reset(PAS_WCNSS);
 }
 
 static int pil_riva_shutdown_trusted(struct pil_desc *pil)
 {
-	return pas_shutdown(PAS_RIVA);
+	return pas_shutdown(PAS_WCNSS);
 }
 
 static struct pil_reset_ops pil_riva_ops_trusted = {
@@ -334,7 +334,7 @@
 	desc->owner = THIS_MODULE;
 	desc->proxy_timeout = 10000;
 
-	if (pas_supported(PAS_RIVA) > 0) {
+	if (pas_supported(PAS_WCNSS) > 0) {
 		desc->ops = &pil_riva_ops_trusted;
 		dev_info(&pdev->dev, "using secure boot\n");
 	} else {
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 5e063a1..a11ca95 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -83,14 +83,6 @@
 	if (!base_ptr)
 		return -ENODEV;
 
-	if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3()) {
-		writel_relaxed(0x10, base_ptr+0x04);
-		writel_relaxed(0x80, base_ptr+0x04);
-	}
-
-	if (machine_is_apq8064_sim())
-		writel_relaxed(0xf0000, base_ptr+0x04);
-
 	if (machine_is_msm8974_sim()) {
 		writel_relaxed(0x800, base_ptr+0x04);
 		writel_relaxed(0x3FFF, base_ptr+0x14);
@@ -135,10 +127,6 @@
 	if (cpu_is_msm8x60())
 		return scorpion_release_secondary();
 
-	if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3() ||
-	    machine_is_apq8064_sim())
-		return krait_release_secondary_sim(0x02088000, cpu);
-
 	if (machine_is_msm8974_sim())
 		return krait_release_secondary_sim(0xf9088000, cpu);
 
diff --git a/arch/arm/mach-msm/pm2.c b/arch/arm/mach-msm/pm2.c
index 7a8e4c3..8fccda4 100644
--- a/arch/arm/mach-msm/pm2.c
+++ b/arch/arm/mach-msm/pm2.c
@@ -21,10 +21,8 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/init.h>
-#include <linux/pm.h>
 #include <linux/pm_qos.h>
 #include <linux/suspend.h>
-#include <linux/reboot.h>
 #include <linux/io.h>
 #include <linux/tick.h>
 #include <linux/memory.h>
@@ -34,7 +32,6 @@
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #endif
-#include <asm/system_misc.h>
 #ifdef CONFIG_CACHE_L2X0
 #include <asm/hardware/cache-l2x0.h>
 #endif
@@ -1577,55 +1574,6 @@
 	}
 }
 
-/******************************************************************************
- * Restart Definitions
- *****************************************************************************/
-
-static uint32_t restart_reason = 0x776655AA;
-
-static void msm_pm_power_off(void)
-{
-	msm_rpcrouter_close();
-	msm_proc_comm(PCOM_POWER_DOWN, 0, 0);
-	for (;;)
-		;
-}
-
-static void msm_pm_restart(char str, const char *cmd)
-{
-	msm_rpcrouter_close();
-	msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0);
-
-	for (;;)
-		;
-}
-
-static int msm_reboot_call
-	(struct notifier_block *this, unsigned long code, void *_cmd)
-{
-	if ((code == SYS_RESTART) && _cmd) {
-		char *cmd = _cmd;
-		if (!strcmp(cmd, "bootloader")) {
-			restart_reason = 0x77665500;
-		} else if (!strcmp(cmd, "recovery")) {
-			restart_reason = 0x77665502;
-		} else if (!strcmp(cmd, "eraseflash")) {
-			restart_reason = 0x776655EF;
-		} else if (!strncmp(cmd, "oem-", 4)) {
-			unsigned code = simple_strtoul(cmd + 4, 0, 16) & 0xff;
-			restart_reason = 0x6f656d00 | code;
-		} else {
-			restart_reason = 0x77665501;
-		}
-	}
-	return NOTIFY_DONE;
-}
-
-static struct notifier_block msm_reboot_notifier = {
-	.notifier_call = msm_reboot_call,
-};
-
-
 /*
  * Initialize the power management subsystem.
  *
@@ -1693,10 +1641,6 @@
 		     virt_to_phys(&msm_pm_pc_pgd));
 #endif
 
-	pm_power_off = msm_pm_power_off;
-	arm_pm_restart = msm_pm_restart;
-	register_reboot_notifier(&msm_reboot_notifier);
-
 	msm_pm_smem_data = smem_alloc(SMEM_APPS_DEM_SLAVE_DATA,
 		sizeof(*msm_pm_smem_data));
 	if (msm_pm_smem_data == NULL) {
diff --git a/arch/arm/mach-msm/qdsp5/adsp.c b/arch/arm/mach-msm/qdsp5/adsp.c
index eee7e37..11f6b28 100644
--- a/arch/arm/mach-msm/qdsp5/adsp.c
+++ b/arch/arm/mach-msm/qdsp5/adsp.c
@@ -1072,12 +1072,28 @@
 int msm_adsp_enable(struct msm_adsp_module *module)
 {
 	int rc = 0;
+	struct msm_adsp_module *module_en = NULL;
 
 	if (!module)
 		return -EINVAL;
 
 	MM_INFO("enable '%s'state[%d] id[%d]\n",
 				module->name, module->state, module->id);
+	if (!strncmp(module->name, "JPEGTASK", sizeof(module->name)))
+		module_en = find_adsp_module_by_name(&adsp_info, "VIDEOTASK");
+	else if (!strncmp(module->name, "VIDEOTASK", sizeof(module->name)))
+		module_en = find_adsp_module_by_name(&adsp_info, "JPEGTASK");
+	if (module_en) {
+		mutex_lock(&module_en->lock);
+		if (module_en->state == ADSP_STATE_ENABLED ||
+			module_en->state == ADSP_STATE_ENABLING) {
+			MM_ERR("both jpeg and video module can't"\
+				" exist at a time\n");
+			mutex_unlock(&module_en->lock);
+			return -EINVAL;
+		}
+		mutex_unlock(&module_en->lock);
+	}
 
 	mutex_lock(&module->lock);
 	switch (module->state) {
diff --git a/arch/arm/mach-msm/qdsp5/audio_out.c b/arch/arm/mach-msm/qdsp5/audio_out.c
index 8eaf829..0c8034c 100644
--- a/arch/arm/mach-msm/qdsp5/audio_out.c
+++ b/arch/arm/mach-msm/qdsp5/audio_out.c
@@ -111,7 +111,7 @@
 
 
 
-#define BUFSZ (960 * 5)
+#define BUFSZ (5248)
 #define DMASZ (BUFSZ * 2)
 
 #define COMMON_OBJ_ID 6
@@ -817,7 +817,7 @@
 		goto done;
 
 	audio->out_buffer_size = BUFSZ;
-	audio->out_sample_rate = 44100;
+	audio->out_sample_rate = 48000;
 	audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V;
 	audio->out_weight = 100;
 
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
index a8773ea..a973b92 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/usf.c
@@ -1131,13 +1131,12 @@
 {
 	uint16_t ind = 0;
 
+	usf_unregister_conflicting_events(
+					usf->conflicting_event_types);
+	usf->conflicting_event_types = 0;
 	for (ind = 0; ind < USF_MAX_EVENT_IND; ++ind) {
 		if (usf->input_ifs[ind] == NULL)
 			continue;
-
-		usf_unregister_conflicting_events(
-						usf->conflicting_event_types);
-		usf->conflicting_event_types = 0;
 		input_unregister_device(usf->input_ifs[ind]);
 		usf->input_ifs[ind] = NULL;
 		pr_debug("%s input_unregister_device[%s]\n",
diff --git a/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c b/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
index b99a9b0..f566e82 100644
--- a/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
+++ b/arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
@@ -39,9 +39,22 @@
 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
 			INPUT_DEVICE_ID_MATCH_KEYBIT |
 			INPUT_DEVICE_ID_MATCH_ABSBIT,
-		.evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) },
+		.evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) },
 		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
-		.absbit = { BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+		/* assumption: ABS_X & ABS_Y are in the same long */
+		.absbit = { [BIT_WORD(ABS_X)] = BIT_MASK(ABS_X) |
+						BIT_MASK(ABS_Y) },
+	},
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+			INPUT_DEVICE_ID_MATCH_KEYBIT |
+			INPUT_DEVICE_ID_MATCH_ABSBIT,
+		.evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) },
+		.keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+		/* assumption: MT_.._X & MT_.._Y are in the same long */
+		.absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+			BIT_MASK(ABS_MT_POSITION_X) |
+			BIT_MASK(ABS_MT_POSITION_Y) },
 	},
 	{ } /* Terminating entry */
 };
@@ -76,12 +89,12 @@
 	int ind = handler->minor;
 
 	pr_debug("%s: name=[%s]; ind=%d\n", __func__, dev->name, ind);
+
 	if (s_usfcdev_events[ind].registered_event &&
 			s_usfcdev_events[ind].match_cb) {
 		rc = (*s_usfcdev_events[ind].match_cb)((uint16_t)ind, dev);
 		pr_debug("%s: [%s]; rc=%d\n", __func__, dev->name, rc);
 	}
-
 	return rc;
 }
 
@@ -128,10 +141,12 @@
 {
 	uint16_t ind = (uint16_t)handle->handler->minor;
 
-	pr_debug("%s: event_type=%d; filter=%d\n",
+	pr_debug("%s: event_type=%d; filter=%d; abs_xy=%ld; abs_y_mt[]=%ld\n",
 		__func__,
 		ind,
-		s_usfcdev_events[ind].filter);
+		s_usfcdev_events[ind].filter,
+		 usfc_tsc_ids[0].absbit[0],
+		 usfc_tsc_ids[1].absbit[1]);
 
 	return s_usfcdev_events[ind].filter;
 }
diff --git a/arch/arm/mach-msm/restart_7k.c b/arch/arm/mach-msm/restart_7k.c
new file mode 100644
index 0000000..dc9edf4
--- /dev/null
+++ b/arch/arm/mach-msm/restart_7k.c
@@ -0,0 +1,101 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+#include <asm/system_misc.h>
+#include <mach/proc_comm.h>
+
+#include "devices-msm7x2xa.h"
+#include "smd_rpcrouter.h"
+
+static uint32_t restart_reason = 0x776655AA;
+
+static void msm_pm_power_off(void)
+{
+	msm_rpcrouter_close();
+	msm_proc_comm(PCOM_POWER_DOWN, 0, 0);
+	for (;;)
+		;
+}
+
+static void msm_pm_restart(char str, const char *cmd)
+{
+	msm_rpcrouter_close();
+	pr_debug("The reset reason is %x\n", restart_reason);
+
+	/* Disable interrupts */
+	local_irq_disable();
+	local_fiq_disable();
+
+	/*
+	 * Take out a flat memory mapping  and will
+	 * insert a 1:1 mapping in place of
+	 * the user-mode pages to ensure predictable results
+	 * This function takes care of flushing the caches
+	 * and flushing the TLB.
+	 */
+	setup_mm_for_reboot();
+
+	msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0);
+
+	for (;;)
+		;
+}
+
+static int msm_reboot_call
+	(struct notifier_block *this, unsigned long code, void *_cmd)
+{
+	if ((code == SYS_RESTART) && _cmd) {
+		char *cmd = _cmd;
+		if (!strncmp(cmd, "bootloader", 10)) {
+			restart_reason = 0x77665500;
+		} else if (!strncmp(cmd, "recovery", 8)) {
+			restart_reason = 0x77665502;
+		} else if (!strncmp(cmd, "eraseflash", 10)) {
+			restart_reason = 0x776655EF;
+		} else if (!strncmp(cmd, "oem-", 4)) {
+			unsigned long code;
+			int res;
+			res = kstrtoul(cmd + 4, 16, &code);
+			code &= 0xff;
+			restart_reason = 0x6f656d00 | code;
+		} else {
+			restart_reason = 0x77665501;
+		}
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block msm_reboot_notifier = {
+	.notifier_call = msm_reboot_call,
+};
+
+static int __init msm_pm_restart_init(void)
+{
+	int ret;
+
+	pm_power_off = msm_pm_power_off;
+	arm_pm_restart = msm_pm_restart;
+
+	ret = register_reboot_notifier(&msm_reboot_notifier);
+	if (ret)
+		pr_err("Failed to register reboot notifier\n");
+
+	return ret;
+}
+late_initcall(msm_pm_restart_init);
diff --git a/arch/arm/mach-msm/rpm-notifier.h b/arch/arm/mach-msm/rpm-notifier.h
index df8d9b3..33086c6 100644
--- a/arch/arm/mach-msm/rpm-notifier.h
+++ b/arch/arm/mach-msm/rpm-notifier.h
@@ -20,8 +20,31 @@
 	uint32_t size;
 	uint8_t *value;
 };
-
+/**
+ * msm_rpm_register_notifier - Register for sleep set notifications
+ *
+ * @nb - notifier block to register
+ *
+ * return 0 on success, errno on failure.
+ */
 int msm_rpm_register_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_unregister_notifier - Unregister previously registered notifications
+ *
+ * @nb - notifier block to unregister
+ *
+ * return 0 on success, errno on failure.
+ */
 int msm_rpm_unregister_notifier(struct notifier_block *nb);
 
+/**
+ * msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep
+ */
+int msm_rpm_enter_sleep(void);
+
+/**
+ * msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse
+ */
+void msm_rpm_exit_sleep(void);
 #endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff --git a/arch/arm/mach-msm/rpm-smd.c b/arch/arm/mach-msm/rpm-smd.c
index 697d504..0faafc8c 100644
--- a/arch/arm/mach-msm/rpm-smd.c
+++ b/arch/arm/mach-msm/rpm-smd.c
@@ -886,6 +886,27 @@
 	return rc;
 }
 EXPORT_SYMBOL(msm_rpm_send_message_noirq);
+
+/**
+ * During power collapse, the rpm driver disables the SMD interrupts to make
+ * sure that the interrupt doesn't wakes us from sleep.
+ */
+int msm_rpm_enter_sleep(void)
+{
+	return smd_mask_receive_interrupt(msm_rpm_data.ch_info, true);
+}
+EXPORT_SYMBOL(msm_rpm_enter_sleep);
+
+/**
+ * When the system resumes from power collapse, the SMD interrupt disabled by
+ * enter function has to reenabled to continue processing SMD message.
+ */
+void msm_rpm_exit_sleep(void)
+{
+	smd_mask_receive_interrupt(msm_rpm_data.ch_info, false);
+}
+EXPORT_SYMBOL(msm_rpm_exit_sleep);
+
 static bool msm_rpm_set_standalone(void)
 {
 	if (machine_is_msm8974()) {
diff --git a/arch/arm/mach-msm/rpm_rbcpr_stats.c b/arch/arm/mach-msm/rpm_rbcpr_stats.c
new file mode 100644
index 0000000..7f27efc
--- /dev/null
+++ b/arch/arm/mach-msm/rpm_rbcpr_stats.c
@@ -0,0 +1,415 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/sort.h>
+#include <asm/uaccess.h>
+#include <mach/msm_iomap.h>
+#include "timer.h"
+#include "rpm_rbcpr_stats.h"
+
+#define RBCPR_USER_BUF		(2000)
+#define STR(a)  (#a)
+#define GETFIELD(a)     ((strnstr(STR(a), "->", 80) + 2))
+#define PRINTFIELD(buf, buf_size, pos, format, ...) \
+	((pos < buf_size) ? snprintf((buf + pos), (buf_size - pos), format,\
+		## __VA_ARGS__) : 0)
+
+enum {
+	RBCPR_CORNER_SVS = 0,
+	RBCPR_CORNER_NOMINAL,
+	RBCPR_CORNER_TURBO,
+	RBCPR_CORNERS_COUNT,
+	RBCPR_CORNER_INVALID = 0x7FFFFFFF,
+};
+
+struct msm_rpmrbcpr_recmnd {
+	uint32_t voltage;
+	uint32_t timestamp;
+};
+
+struct msm_rpmrbcpr_corners {
+	int efuse_adjustment;
+	struct msm_rpmrbcpr_recmnd *rpm_rcmnd;
+	uint32_t programmed_voltage;
+	uint32_t isr_counter;
+	uint32_t min_counter;
+	uint32_t max_counter;
+};
+
+struct msm_rpmrbcpr_stats {
+	uint32_t status_count;
+	uint32_t num_corners;
+	uint32_t num_latest_recommends;
+	struct msm_rpmrbcpr_corners *rbcpr_corners;
+	uint32_t current_corner;
+	uint32_t railway_voltage;
+	uint32_t enable;
+};
+
+struct msm_rpmrbcpr_stats_internal {
+	void __iomem *regbase;
+	uint32_t len;
+	char buf[RBCPR_USER_BUF];
+};
+
+static DEFINE_SPINLOCK(rpm_rbcpr_lock);
+static struct msm_rpmrbcpr_design_data rbcpr_design_data;
+static struct msm_rpmrbcpr_stats rbcpr_stats;
+static struct msm_rpmrbcpr_stats_internal pvtdata;
+
+static inline unsigned long msm_rpmrbcpr_read_data(void __iomem *regbase,
+						int offset)
+{
+	return readl_relaxed(regbase + (offset * 4));
+}
+
+static int msm_rpmrbcpr_cmp_func(const void *a, const void *b)
+{
+	struct msm_rpmrbcpr_recmnd *pa = (struct msm_rpmrbcpr_recmnd *)(a);
+	struct msm_rpmrbcpr_recmnd *pb = (struct msm_rpmrbcpr_recmnd *)(b);
+	return pa->timestamp - pb->timestamp;
+}
+
+static char *msm_rpmrbcpr_corner_string(uint32_t corner)
+{
+	switch (corner) {
+	case RBCPR_CORNER_SVS:
+		return STR(RBCPR_CORNER_SVS);
+		break;
+	case RBCPR_CORNER_NOMINAL:
+		return STR(RBCPR_CORNER_NOMINAL);
+		break;
+	case RBCPR_CORNER_TURBO:
+		return STR(RBCPR_CORNER_TURBO);
+		break;
+	case RBCPR_CORNERS_COUNT:
+	case RBCPR_CORNER_INVALID:
+	default:
+		return STR(RBCPR_CORNER_INVALID);
+		break;
+	}
+}
+
+static int msm_rpmrbcpr_print_buf(struct msm_rpmrbcpr_stats *pdata,
+				struct msm_rpmrbcpr_design_data *pdesdata,
+				char *buf)
+{
+	int pos = 0;
+	struct msm_rpmrbcpr_corners *corners;
+	struct msm_rpmrbcpr_recmnd *rcmnd;
+	int i, j;
+	int current_timestamp = msm_timer_get_sclk_ticks();
+
+	if (!pdata->enable) {
+		pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+				"RBCPR Stats not enabled at RPM");
+		return pos;
+	}
+
+	pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+				":RBCPR Platform Data");
+	pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+		" (%s: %u)", GETFIELD(pdesdata->upside_steps),
+				pdesdata->upside_steps);
+	pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+		" (%s:%u)", GETFIELD(pdesdata->downside_steps),
+				pdesdata->downside_steps);
+	pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+		" (%s: %d)", GETFIELD(pdesdata->svs_voltage),
+				pdesdata->svs_voltage);
+	pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+		" (%s: %d)", GETFIELD(pdesdata->nominal_voltage),
+				pdesdata->nominal_voltage);
+	pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+		" (%s: %d)\n", GETFIELD(pdesdata->turbo_voltage),
+				pdesdata->turbo_voltage);
+
+	pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+				":RBCPR Stats");
+	pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+		" (%s: %u)", GETFIELD(pdata->status_counter),
+				pdata->status_count);
+	pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+		" (%s: %s)", GETFIELD(pdata->current_corner),
+			msm_rpmrbcpr_corner_string(pdata->current_corner));
+	pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+		" (current_timestamp: 0x%x)",
+				current_timestamp);
+	pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+		" (%s: %u)\n", GETFIELD(pdata->railway_voltage),
+			pdata->railway_voltage);
+
+	for (i = 0; i < pdata->num_corners; i++) {
+		corners = &pdata->rbcpr_corners[i];
+		pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+				":\tRBCPR Corner Data");
+		pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+			" (name: %s)", msm_rpmrbcpr_corner_string(i));
+		pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+			" (%s: %d)", GETFIELD(corners->efuse_adjustment),
+			corners->efuse_adjustment);
+		pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+			" (%s: %u)", GETFIELD(corners->programmed_voltage),
+						corners->programmed_voltage);
+		pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+			" (%s: %u)", GETFIELD(corners->isr_counter),
+						corners->isr_counter);
+		pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+			"(%s: %u)", GETFIELD(corners->min_counter),
+						corners->min_counter);
+		pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+			"(%s:%u)\n", GETFIELD(corners->max_counter),
+						corners->max_counter);
+		for (j = 0; j < pdata->num_latest_recommends; j++) {
+			pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+				":\t\tVoltage History[%d]", j);
+			rcmnd = &corners->rpm_rcmnd[j];
+			pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+			" (%s: %u)", GETFIELD(rcmnd->voltage),
+							rcmnd->voltage);
+			pos += PRINTFIELD(buf, RBCPR_USER_BUF, pos,
+			" (%s: 0x%x)\n", GETFIELD(rcmnd->timestamp),
+							rcmnd->timestamp);
+		}
+	}
+	return pos;
+}
+
+
+static void msm_rpmrbcpr_copy_data(struct msm_rpmrbcpr_stats_internal *pdata,
+					struct msm_rpmrbcpr_stats *prbcpr_stats)
+{
+	struct msm_rpmrbcpr_corners *corners;
+	struct msm_rpmrbcpr_recmnd *rcmnd;
+	int i, j;
+	int offset = (offsetof(struct msm_rpmrbcpr_stats, rbcpr_corners) / 4);
+
+	if (!prbcpr_stats)
+		return;
+
+	for (i = 0; i < prbcpr_stats->num_corners; i++) {
+		corners = &prbcpr_stats->rbcpr_corners[i];
+		corners->efuse_adjustment = msm_rpmrbcpr_read_data(
+					pdata->regbase, offset++);
+		for (j = 0; j < prbcpr_stats->num_latest_recommends; j++) {
+			rcmnd = &corners->rpm_rcmnd[j];
+			rcmnd->voltage = msm_rpmrbcpr_read_data(
+					pdata->regbase, offset++);
+			rcmnd->timestamp = msm_rpmrbcpr_read_data(
+					pdata->regbase, offset++);
+		}
+		sort(&corners->rpm_rcmnd[0],
+			prbcpr_stats->num_latest_recommends,
+			sizeof(struct msm_rpmrbcpr_recmnd),
+			msm_rpmrbcpr_cmp_func, NULL);
+		corners->programmed_voltage = msm_rpmrbcpr_read_data(
+					pdata->regbase, offset++);
+		corners->isr_counter = msm_rpmrbcpr_read_data(pdata->regbase,
+					offset++);
+		corners->min_counter = msm_rpmrbcpr_read_data(pdata->regbase,
+					offset++);
+		corners->max_counter = msm_rpmrbcpr_read_data(pdata->regbase,
+					offset++);
+	}
+	prbcpr_stats->current_corner = msm_rpmrbcpr_read_data(pdata->regbase,
+					offset++);
+	prbcpr_stats->railway_voltage = msm_rpmrbcpr_read_data
+				(pdata->regbase, offset++);
+	prbcpr_stats->enable = msm_rpmrbcpr_read_data(pdata->regbase, offset++);
+}
+
+static int msm_rpmrbcpr_file_read(struct file *file, char __user *bufu,
+				size_t count, loff_t *ppos)
+{
+	struct msm_rpmrbcpr_stats_internal *pdata = file->private_data;
+	int ret;
+	int status_counter;
+
+	if (!pdata) {
+		pr_info("%s pdata is null", __func__);
+		return -EINVAL;
+	}
+
+	if (!bufu || count < 0) {
+		pr_info("%s count %d ", __func__, count);
+		return -EINVAL;
+	}
+
+	if (*ppos > pdata->len || !pdata->len) {
+		/* Read RPM stats */
+		status_counter = readl_relaxed(pdata->regbase +
+			offsetof(struct msm_rpmrbcpr_stats, status_count));
+		if (status_counter != rbcpr_stats.status_count) {
+			spin_lock(&rpm_rbcpr_lock);
+			msm_rpmrbcpr_copy_data(pdata, &rbcpr_stats);
+			rbcpr_stats.status_count = status_counter;
+			spin_unlock(&rpm_rbcpr_lock);
+		}
+		pdata->len = msm_rpmrbcpr_print_buf(&rbcpr_stats,
+				&rbcpr_design_data, pdata->buf);
+		*ppos = 0;
+	}
+	/* copy to user data */
+	ret = simple_read_from_buffer(bufu, count, ppos, pdata->buf,
+					pdata->len);
+	return ret;
+}
+
+static void msm_rpmrbcpr_free_mem(struct msm_rpmrbcpr_stats_internal *pvtdata,
+				struct msm_rpmrbcpr_stats *prbcpr_stats)
+{
+	int i;
+	if (pvtdata->regbase)
+		iounmap(pvtdata->regbase);
+
+
+	if (prbcpr_stats) {
+		for (i = 0; i < prbcpr_stats->num_corners; i++) {
+			kfree(prbcpr_stats->rbcpr_corners[i].rpm_rcmnd);
+			prbcpr_stats->rbcpr_corners[i].rpm_rcmnd = NULL;
+		}
+
+		kfree(prbcpr_stats->rbcpr_corners);
+		prbcpr_stats->rbcpr_corners = NULL;
+	}
+}
+
+static int msm_rpmrbcpr_allocate_mem(struct msm_rpmrbcpr_platform_data *pdata,
+				struct resource *res)
+{
+	int i;
+
+	pvtdata.regbase = ioremap(res->start, (res->end - res->start + 1));
+	memcpy(&rbcpr_design_data, &pdata->rbcpr_data,
+			sizeof(struct msm_rpmrbcpr_design_data));
+
+
+	rbcpr_stats.num_corners = readl_relaxed(pvtdata.regbase +
+			offsetof(struct msm_rpmrbcpr_stats, num_corners));
+	rbcpr_stats.num_latest_recommends = readl_relaxed(pvtdata.regbase +
+			offsetof(struct msm_rpmrbcpr_stats,
+				num_latest_recommends));
+
+	rbcpr_stats.rbcpr_corners = kzalloc(
+				sizeof(struct msm_rpmrbcpr_corners)
+				* rbcpr_stats.num_corners, GFP_KERNEL);
+
+	if (!rbcpr_stats.rbcpr_corners) {
+		msm_rpmrbcpr_free_mem(&pvtdata, &rbcpr_stats);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < rbcpr_stats.num_corners; i++) {
+		rbcpr_stats.rbcpr_corners[i].rpm_rcmnd =
+			kzalloc(sizeof(struct msm_rpmrbcpr_corners)
+				* rbcpr_stats.num_latest_recommends,
+							GFP_KERNEL);
+
+		if (!rbcpr_stats.rbcpr_corners[i].rpm_rcmnd) {
+			msm_rpmrbcpr_free_mem(&pvtdata, &rbcpr_stats);
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static int msm_rpmrbcpr_file_open(struct inode *inode, struct file *file)
+{
+	file->private_data = &pvtdata;
+	pvtdata.len = 0;
+
+	if (!pvtdata.regbase)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int msm_rpmrbcpr_file_close(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations msm_rpmrbcpr_fops = {
+	.owner    = THIS_MODULE,
+	.open     = msm_rpmrbcpr_file_open,
+	.read     = msm_rpmrbcpr_file_read,
+	.release  = msm_rpmrbcpr_file_close,
+	.llseek   = no_llseek,
+};
+
+static  int __devinit msm_rpmrbcpr_probe(struct platform_device *pdev)
+{
+	struct dentry *dent;
+	struct msm_rpmrbcpr_platform_data *pdata;
+	int ret = 0;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata)
+		return -EINVAL;
+	dent = debugfs_create_file("rpm_rbcpr", S_IRUGO, NULL,
+	pdev->dev.platform_data, &msm_rpmrbcpr_fops);
+
+	if (!dent) {
+		pr_err("%s: ERROR debugfs_create_file failed\n", __func__);
+		return -ENOMEM;
+	}
+	platform_set_drvdata(pdev, dent);
+	ret = msm_rpmrbcpr_allocate_mem(pdata, pdev->resource);
+	return ret;
+}
+
+static int __devexit msm_rpmrbcpr_remove(struct platform_device *pdev)
+{
+	struct dentry *dent;
+
+	msm_rpmrbcpr_free_mem(&pvtdata, &rbcpr_stats);
+	dent = platform_get_drvdata(pdev);
+	debugfs_remove(dent);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static struct platform_driver msm_rpmrbcpr_driver = {
+	.probe  = msm_rpmrbcpr_probe,
+	.remove = __devexit_p(msm_rpmrbcpr_remove),
+	.driver = {
+		.name = "msm_rpm_rbcpr",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init msm_rpmrbcpr_init(void)
+{
+	return platform_driver_register(&msm_rpmrbcpr_driver);
+}
+
+static void __exit msm_rpmrbcpr_exit(void)
+{
+	platform_driver_unregister(&msm_rpmrbcpr_driver);
+}
+
+module_init(msm_rpmrbcpr_init);
+module_exit(msm_rpmrbcpr_exit);
diff --git a/arch/arm/mach-msm/rpm_rbcpr_stats.h b/arch/arm/mach-msm/rpm_rbcpr_stats.h
new file mode 100644
index 0000000..55644d0
--- /dev/null
+++ b/arch/arm/mach-msm/rpm_rbcpr_stats.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_RBCPR_STATS_H
+#define __ARCH_ARM_MACH_MSM_RPM_RBCPR_STATS_H
+
+#include <linux/types.h>
+
+struct msm_rpmrbcpr_design_data {
+	u32 upside_steps;
+	u32 downside_steps;
+	int svs_voltage;
+	int nominal_voltage;
+	int turbo_voltage;
+};
+
+struct msm_rpmrbcpr_platform_data {
+	struct msm_rpmrbcpr_design_data rbcpr_data;
+};
+#endif
diff --git a/arch/arm/mach-msm/rpm_resources.c b/arch/arm/mach-msm/rpm_resources.c
index a88e42e..9d794e7 100644
--- a/arch/arm/mach-msm/rpm_resources.c
+++ b/arch/arm/mach-msm/rpm_resources.c
@@ -967,7 +967,7 @@
 			return rc;
 
 		if (msm_rpmrs_use_mpm(limits))
-			msm_mpm_enter_sleep(from_idle);
+			msm_mpm_enter_sleep(sclk_count, from_idle);
 	}
 
 	rc = msm_rpmrs_flush_L2(limits, notify_rpm);
diff --git a/arch/arm/mach-msm/scm-pas.h b/arch/arm/mach-msm/scm-pas.h
index 2fe71a9..dd24e20 100644
--- a/arch/arm/mach-msm/scm-pas.h
+++ b/arch/arm/mach-msm/scm-pas.h
@@ -19,7 +19,7 @@
 	PAS_TZAPPS,
 	PAS_MODEM_SW,
 	PAS_MODEM_FW,
-	PAS_RIVA,
+	PAS_WCNSS,
 	PAS_SECAPP,
 	PAS_GSS,
 	PAS_VIDC,
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
index 948dbbb..decee95 100644
--- a/arch/arm/mach-msm/smd.c
+++ b/arch/arm/mach-msm/smd.c
@@ -124,6 +124,7 @@
 	uint32_t out_bit_pos;
 	void __iomem *out_base;
 	uint32_t out_offset;
+	int irq_id;
 };
 
 struct interrupt_config {
@@ -2128,6 +2129,56 @@
 }
 EXPORT_SYMBOL(smd_disable_read_intr);
 
+/**
+ * Enable/disable receive interrupts for the remote processor used by a
+ * particular channel.
+ * @ch:      open channel handle to use for the edge
+ * @mask:    1 = mask interrupts; 0 = unmask interrupts
+ * @returns: 0 for success; < 0 for failure
+ *
+ * Note that this enables/disables all interrupts from the remote subsystem for
+ * all channels.  As such, it should be used with care and only for specific
+ * use cases such as power-collapse sequencing.
+ */
+int smd_mask_receive_interrupt(smd_channel_t *ch, bool mask)
+{
+	struct irq_chip *irq_chip;
+	struct irq_data *irq_data;
+	struct interrupt_config_item *int_cfg;
+
+	if (!ch)
+		return -EINVAL;
+
+	if (ch->type >= ARRAY_SIZE(edge_to_pids))
+		return -ENODEV;
+
+	int_cfg = &private_intr_config[edge_to_pids[ch->type].remote_pid].smd;
+
+	if (int_cfg->irq_id < 0)
+		return -ENODEV;
+
+	irq_chip = irq_get_chip(int_cfg->irq_id);
+	if (!irq_chip)
+		return -ENODEV;
+
+	irq_data = irq_get_irq_data(int_cfg->irq_id);
+	if (!irq_data)
+		return -ENODEV;
+
+	if (mask) {
+		SMx_POWER_INFO("SMD Masking interrupts from %s\n",
+				edge_to_pids[ch->type].subsys_name);
+		irq_chip->irq_mask(irq_data);
+	} else {
+		SMx_POWER_INFO("SMD Unmasking interrupts from %s\n",
+				edge_to_pids[ch->type].subsys_name);
+		irq_chip->irq_unmask(irq_data);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(smd_mask_receive_interrupt);
+
 int smd_wait_until_readable(smd_channel_t *ch, int bytes)
 {
 	return -1;
@@ -3246,8 +3297,10 @@
 			);
 	if (ret < 0) {
 		platform_irq->irq_id = ret;
+		private_irq->irq_id = ret;
 	} else {
 		platform_irq->irq_id = irq_id;
+		private_irq->irq_id = irq_id;
 		ret_wake = enable_irq_wake(irq_id);
 		if (ret_wake < 0) {
 			pr_err("smd: enable_irq_wake failed on %s",
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
index 817c2dc..39fbba8 100644
--- a/arch/arm/mach-msm/socinfo.c
+++ b/arch/arm/mach-msm/socinfo.c
@@ -628,11 +628,8 @@
 
 static void * __init setup_dummy_socinfo(void)
 {
-	if (machine_is_msm8960_rumi3() || machine_is_msm8960_sim() ||
-	    machine_is_msm8960_cdp())
+	if (machine_is_msm8960_cdp())
 		dummy_socinfo.id = 87;
-	else if (machine_is_apq8064_rumi3() || machine_is_apq8064_sim())
-		dummy_socinfo.id = 109;
 	else if (machine_is_msm9615_mtp() || machine_is_msm9615_cdp())
 		dummy_socinfo.id = 104;
 	else if (early_machine_is_msm8974()) {
@@ -757,9 +754,7 @@
 	if (!(read_cpuid_mpidr() & BIT(31)))
 		return 1;
 
-	if (read_cpuid_mpidr() & BIT(30) &&
-		!machine_is_msm8960_sim() &&
-		!machine_is_apq8064_sim())
+	if (read_cpuid_mpidr() & BIT(30))
 		return 1;
 
 	/* 1 + the PART[1:0] field of MIDR */
@@ -768,9 +763,6 @@
 
 const int read_msm_cpu_type(void)
 {
-	if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3())
-		return MSM_CPU_8960;
-
 	if (socinfo_get_msm_cpu() != MSM_CPU_UNKNOWN)
 		return socinfo_get_msm_cpu();
 
@@ -793,6 +785,10 @@
 	case 0x510F06F0:
 		return MSM_CPU_8064;
 
+	case 0x511F06F1:
+	case 0x512F06F0:
+		return MSM_CPU_8974;
+
 	default:
 		return MSM_CPU_UNKNOWN;
 	};
diff --git a/arch/arm/mach-msm/wcnss-ssr-8960.c b/arch/arm/mach-msm/wcnss-ssr-8960.c
index 4295d9b0..318523b 100644
--- a/arch/arm/mach-msm/wcnss-ssr-8960.c
+++ b/arch/arm/mach-msm/wcnss-ssr-8960.c
@@ -132,6 +132,7 @@
 {
 	pil_force_shutdown("wcnss");
 	flush_delayed_work(&cancel_vote_work);
+	wcnss_flush_delayed_boot_votes();
 	disable_irq_nosync(RIVA_APSS_WDOG_BITE_RESET_RDY_IRQ);
 
 	return 0;
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 9107231..94b1e61 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -856,8 +856,10 @@
 	case 0x08000000:	/* ldm or stm, or thumb-2 32bit instruction */
 		if (thumb2_32b)
 			handler = do_alignment_t32_to_handler(&instr, regs, &offset);
-		else
+		else {
 			handler = do_alignment_ldmstm;
+			offset.un = 0;
+		}
 		break;
 
 	default:
diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c
index aed71c0..f54d820 100644
--- a/drivers/base/genlock.c
+++ b/drivers/base/genlock.c
@@ -116,6 +116,7 @@
 struct genlock *genlock_create_lock(struct genlock_handle *handle)
 {
 	struct genlock *lock;
+	void *ret;
 
 	if (IS_ERR_OR_NULL(handle)) {
 		GENLOCK_LOG_ERR("Invalid handle\n");
@@ -145,8 +146,13 @@
 	 * other processes
 	 */
 
-	lock->file = anon_inode_getfile("genlock", &genlock_fops,
-		lock, O_RDWR);
+	ret = anon_inode_getfile("genlock", &genlock_fops, lock, O_RDWR);
+	if (IS_ERR_OR_NULL(ret)) {
+		GENLOCK_LOG_ERR("Unable to create lock inode\n");
+		kfree(lock);
+		return ret;
+	}
+	lock->file = ret;
 
 	/* Attach the new lock to the handle */
 	handle->lock = lock;
@@ -660,12 +666,19 @@
 
 struct genlock_handle *genlock_get_handle(void)
 {
+	void *ret;
 	struct genlock_handle *handle = _genlock_get_handle();
 	if (IS_ERR(handle))
 		return handle;
 
-	handle->file = anon_inode_getfile("genlock-handle",
+	ret = anon_inode_getfile("genlock-handle",
 		&genlock_handle_fops, handle, O_RDWR);
+	if (IS_ERR_OR_NULL(ret)) {
+		GENLOCK_LOG_ERR("Unable to create handle inode\n");
+		kfree(handle);
+		return ret;
+	}
+	handle->file = ret;
 
 	return handle;
 }
diff --git a/drivers/bluetooth/hci_ibs.c b/drivers/bluetooth/hci_ibs.c
index 6845020..fb084f5 100644
--- a/drivers/bluetooth/hci_ibs.c
+++ b/drivers/bluetooth/hci_ibs.c
@@ -37,9 +37,7 @@
 #include <linux/fcntl.h>
 #include <linux/interrupt.h>
 #include <linux/ptrace.h>
-#include <linux/ftrace.h>
 #include <linux/poll.h>
-#include <linux/workqueue.h>
 
 #include <linux/slab.h>
 #include <linux/tty.h>
@@ -94,15 +92,6 @@
 	HCI_IBS_RX_VOTE_CLOCK_OFF,
 };
 
-/* HCI_IBS state for the WorkQueue */
-enum hci_ibs_wq_state_e {
-	HCI_IBS_WQ_INIT_STATE = 0,
-	HCI_IBS_WQ_TX_VOTE_OFF,
-	HCI_IBS_WQ_RX_VOTE_OFF,
-	HCI_IBS_WQ_AWAKE_RX,
-	HCI_IBS_WQ_AWAKE_DEVICE,
-};
-
 static unsigned long wake_retrans = 1;
 static unsigned long tx_idle_delay = (HZ * 2);
 
@@ -123,11 +112,6 @@
 	unsigned long rx_vote;		/* clock must be on for RX */
 	struct	timer_list tx_idle_timer;
 	struct	timer_list wake_retrans_timer;
-	struct	workqueue_struct *workqueue;
-	struct	work_struct ws_ibs;
-	unsigned long ibs_wq_state;
-	void *ibs_hu; /* keeps the hci_uart pointer for reference */
-
 	/* debug */
 	unsigned long ibs_sent_wacks;
 	unsigned long ibs_sent_slps;
@@ -258,56 +242,6 @@
 	return err;
 }
 
-static void ibs_wq(struct work_struct *work)
-{
-	unsigned long flags = 0;
-	struct ibs_struct *ibs = container_of(work, struct ibs_struct,
-						ws_ibs);
-	struct hci_uart *hu = (struct hci_uart *)ibs->ibs_hu;
-
-	BT_DBG("hu %p, ibs_wq state: %lu\n", hu, ibs->ibs_wq_state);
-
-	/* lock hci_ibs state */
-	spin_lock_irqsave(&ibs->hci_ibs_lock, flags);
-
-	switch (ibs->ibs_wq_state) {
-	case HCI_IBS_WQ_AWAKE_DEVICE:
-		/* Vote for serial clock */
-		ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
-
-		/* send wake indication to device */
-		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
-			BT_ERR("cannot send WAKE to device");
-
-		ibs->ibs_sent_wakes++; /* debug */
-
-		/* start retransmit timer */
-		mod_timer(&ibs->wake_retrans_timer, jiffies + wake_retrans);
-		break;
-	case HCI_IBS_WQ_AWAKE_RX:
-		ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
-		ibs->rx_ibs_state = HCI_IBS_RX_AWAKE;
-
-		if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0)
-			BT_ERR("cannot acknowledge device wake up");
-
-		ibs->ibs_sent_wacks++; /* debug */
-		/* actually send the packets */
-		hci_uart_tx_wakeup(hu);
-		break;
-	case HCI_IBS_WQ_RX_VOTE_OFF:
-		ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
-		break;
-	case HCI_IBS_WQ_TX_VOTE_OFF:
-		ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
-		break;
-	default:
-		BT_DBG("Invalid state in ibs workqueue");
-		break;
-	}
-	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
-}
-
 static void hci_ibs_tx_idle_timeout(unsigned long arg)
 {
 	struct hci_uart *hu = (struct hci_uart *) arg;
@@ -348,9 +282,7 @@
 
 	spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
 					flags, SINGLE_DEPTH_NESTING);
-	/* vote off tx clock */
-	ibs->ibs_wq_state = HCI_IBS_WQ_TX_VOTE_OFF;
-	queue_work(ibs->workqueue, &ibs->ws_ibs);
+	ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
 out:
 	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
 }
@@ -404,16 +336,6 @@
 	skb_queue_head_init(&ibs->txq);
 	skb_queue_head_init(&ibs->tx_wait_q);
 	spin_lock_init(&ibs->hci_ibs_lock);
-	ibs->workqueue = create_singlethread_workqueue("ibs_wq");
-	if (!ibs->workqueue) {
-		BT_ERR("IBS Workqueue not initialized properly");
-		kfree(ibs);
-		return -ENOMEM;
-	}
-
-	INIT_WORK(&ibs->ws_ibs, ibs_wq);
-	ibs->ibs_hu = (void *)hu;
-	ibs->ibs_wq_state = HCI_IBS_WQ_INIT_STATE;
 
 	/* Assume we start with both sides asleep -- extra wakes OK */
 	ibs->tx_ibs_state = HCI_IBS_TX_ASLEEP;
@@ -510,8 +432,6 @@
 	skb_queue_purge(&ibs->txq);
 	del_timer(&ibs->tx_idle_timer);
 	del_timer(&ibs->wake_retrans_timer);
-	destroy_workqueue(ibs->workqueue);
-	ibs->ibs_hu = NULL;
 
 	kfree_skb(ibs->rx_skb);
 
@@ -543,11 +463,9 @@
 		/* Make sure clock is on - we may have turned clock off since
 		 * receiving the wake up indicator
 		 */
-		/* awake rx clock */
-		ibs->ibs_wq_state = HCI_IBS_WQ_AWAKE_RX;
-		queue_work(ibs->workqueue, &ibs->ws_ibs);
-		spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
-		return;
+		ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
+		ibs->rx_ibs_state = HCI_IBS_RX_AWAKE;
+		/* deliberate fall-through */
 	case HCI_IBS_RX_AWAKE:
 		/* Always acknowledge device wake up,
 		 * sending IBS message doesn't count as TX ON.
@@ -592,9 +510,7 @@
 	case HCI_IBS_RX_AWAKE:
 		/* update state */
 		ibs->rx_ibs_state = HCI_IBS_RX_ASLEEP;
-		/* vote off rx clock under workqueue */
-		ibs->ibs_wq_state = HCI_IBS_WQ_RX_VOTE_OFF;
-		queue_work(ibs->workqueue, &ibs->ws_ibs);
+		ibs_msm_serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu);
 		break;
 	case HCI_IBS_RX_ASLEEP:
 		/* deliberate fall-through */
@@ -679,12 +595,20 @@
 
 	case HCI_IBS_TX_ASLEEP:
 		BT_DBG("device asleep, waking up and queueing packet");
+		ibs_msm_serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
 		/* save packet for later */
 		skb_queue_tail(&ibs->tx_wait_q, skb);
+		/* awake device */
+		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
+			BT_ERR("cannot send WAKE to device");
+			break;
+		}
+		ibs->ibs_sent_wakes++; /* debug */
+
+		/* start retransmit timer */
+		mod_timer(&ibs->wake_retrans_timer, jiffies + wake_retrans);
+
 		ibs->tx_ibs_state = HCI_IBS_TX_WAKING;
-		/* schedule a work queue to wake up device */
-		ibs->ibs_wq_state = HCI_IBS_WQ_AWAKE_DEVICE;
-		queue_work(ibs->workqueue, &ibs->ws_ibs);
 		break;
 
 	case HCI_IBS_TX_WAKING:
diff --git a/drivers/char/diag/diag_dci.h b/drivers/char/diag/diag_dci.h
index c0b82df..b70efe3 100644
--- a/drivers/char/diag/diag_dci.h
+++ b/drivers/char/diag/diag_dci.h
@@ -28,11 +28,6 @@
 	int signal_type;
 };
 
-#define DIAG_CON_APSS (0x0001)	/* Bit mask for APSS */
-#define DIAG_CON_MPSS (0x0002)	/* Bit mask for MPSS */
-#define DIAG_CON_LPASS (0x0004)	/* Bit mask for LPASS */
-#define DIAG_CON_WCNSS (0x0008)	/* Bit mask for WCNSS */
-
 enum {
 	DIAG_DCI_NO_ERROR = 1001,	/* No error */
 	DIAG_DCI_NO_REG,		/* Could not register */
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 2f356f0..95a85f2a 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -58,6 +58,11 @@
 #define DIAG_CTRL_MSG_F3_MASK	11
 #define CONTROL_CHAR	0x7E
 
+#define DIAG_CON_APSS (0x0001)	/* Bit mask for APSS */
+#define DIAG_CON_MPSS (0x0002)	/* Bit mask for MPSS */
+#define DIAG_CON_LPASS (0x0004)	/* Bit mask for LPASS */
+#define DIAG_CON_WCNSS (0x0008)	/* Bit mask for WCNSS */
+
 /* Maximum number of pkt reg supported at initialization*/
 extern unsigned int diag_max_reg;
 extern unsigned int diag_threshold_reg;
@@ -224,6 +229,9 @@
 	struct work_struct diag_qdsp_mask_update_work;
 	struct work_struct diag_wcnss_mask_update_work;
 	struct work_struct diag_read_smd_dci_work;
+	struct work_struct diag_clean_modem_reg_work;
+	struct work_struct diag_clean_lpass_reg_work;
+	struct work_struct diag_clean_wcnss_reg_work;
 	uint8_t *msg_masks;
 	uint8_t *log_masks;
 	int log_masks_length;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 547f42f..240a514 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1268,6 +1268,12 @@
 			diag_read_smd_wcnss_cntl_work_fn);
 		INIT_WORK(&(driver->diag_read_smd_dci_work),
 						 diag_read_smd_dci_work_fn);
+		INIT_WORK(&(driver->diag_clean_modem_reg_work),
+						 diag_clean_modem_reg_fn);
+		INIT_WORK(&(driver->diag_clean_lpass_reg_work),
+						 diag_clean_lpass_reg_fn);
+		INIT_WORK(&(driver->diag_clean_wcnss_reg_work),
+						 diag_clean_wcnss_reg_fn);
 		diag_debugfs_init();
 		diagfwd_init();
 		diagfwd_cntl_init();
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 384c1bf..b228276 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1735,8 +1735,8 @@
 static void diag_smd_notify(void *ctxt, unsigned event)
 {
 	if (event == SMD_EVENT_CLOSE) {
-		pr_info("diag: clean modem registration\n");
-		diag_clear_reg(MODEM_PROC);
+		queue_work(driver->diag_cntl_wq,
+			 &(driver->diag_clean_modem_reg_work));
 		driver->ch = 0;
 		return;
 	} else if (event == SMD_EVENT_OPEN) {
@@ -1750,8 +1750,8 @@
 static void diag_smd_qdsp_notify(void *ctxt, unsigned event)
 {
 	if (event == SMD_EVENT_CLOSE) {
-		pr_info("diag: clean lpass registration\n");
-		diag_clear_reg(QDSP_PROC);
+		queue_work(driver->diag_cntl_wq,
+			 &(driver->diag_clean_lpass_reg_work));
 		driver->chqdsp = 0;
 		return;
 	} else if (event == SMD_EVENT_OPEN) {
@@ -1765,8 +1765,8 @@
 static void diag_smd_wcnss_notify(void *ctxt, unsigned event)
 {
 	if (event == SMD_EVENT_CLOSE) {
-		pr_info("diag: clean wcnss registration\n");
-		diag_clear_reg(WCNSS_PROC);
+		queue_work(driver->diag_cntl_wq,
+			 &(driver->diag_clean_wcnss_reg_work));
 		driver->ch_wcnss = 0;
 		return;
 	} else if (event == SMD_EVENT_OPEN) {
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index de1a5b5..95abd21 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -20,9 +20,34 @@
 #ifdef CONFIG_DEBUG_FS
 #include <linux/debugfs.h>
 #endif
-
+/* tracks which peripheral is undergoing SSR */
+static uint16_t reg_dirty;
 #define HDR_SIZ 8
 
+void diag_clean_modem_reg_fn(struct work_struct *work)
+{
+	pr_debug("diag: clean modem registration\n");
+	reg_dirty |= DIAG_CON_MPSS;
+	diag_clear_reg(MODEM_PROC);
+	reg_dirty ^= DIAG_CON_MPSS;
+}
+
+void diag_clean_lpass_reg_fn(struct work_struct *work)
+{
+	pr_debug("diag: clean lpass registration\n");
+	reg_dirty |= DIAG_CON_LPASS;
+	diag_clear_reg(QDSP_PROC);
+	reg_dirty ^= DIAG_CON_LPASS;
+}
+
+void diag_clean_wcnss_reg_fn(struct work_struct *work)
+{
+	pr_debug("diag: clean wcnss registration\n");
+	reg_dirty |= DIAG_CON_WCNSS;
+	diag_clear_reg(WCNSS_PROC);
+	reg_dirty ^= DIAG_CON_WCNSS;
+}
+
 void diag_smd_cntl_notify(void *ctxt, unsigned event)
 {
 	int r1, r2;
@@ -105,6 +130,8 @@
 	struct bindpkt_params *temp;
 	void *buf = NULL;
 	smd_channel_t *smd_ch = NULL;
+	/* tracks which peripheral is sending registration */
+	uint16_t reg_mask = 0;
 
 	if (pkt_params == NULL) {
 		pr_alert("diag: Memory allocation failure\n");
@@ -114,12 +141,15 @@
 	if (proc_num == MODEM_PROC) {
 		buf = driver->buf_in_cntl;
 		smd_ch = driver->ch_cntl;
+		reg_mask = DIAG_CON_MPSS;
 	} else if (proc_num == QDSP_PROC) {
 		buf = driver->buf_in_qdsp_cntl;
 		smd_ch = driver->chqdsp_cntl;
+		reg_mask = DIAG_CON_LPASS;
 	} else if (proc_num == WCNSS_PROC) {
 		buf = driver->buf_in_wcnss_cntl;
 		smd_ch = driver->ch_wcnss_cntl;
+		reg_mask = DIAG_CON_WCNSS;
 	}
 
 	if (!smd_ch || !buf) {
@@ -180,8 +210,16 @@
 				temp -= pkt_params->count;
 				pkt_params->params = temp;
 				flag = 1;
-				diagchar_ioctl(NULL, DIAG_IOCTL_COMMAND_REG,
-						 (unsigned long)pkt_params);
+				/* peripheral undergoing SSR should not
+				 * record new registration
+				 */
+				if (!(reg_dirty & reg_mask))
+					diagchar_ioctl(NULL,
+					 DIAG_IOCTL_COMMAND_REG, (unsigned long)
+								pkt_params);
+				else
+					pr_err("diag: drop reg proc %d\n",
+								 proc_num);
 				kfree(temp);
 			}
 			buf = buf + HDR_SIZ + data_len;
@@ -275,6 +313,7 @@
 
 void diagfwd_cntl_init(void)
 {
+	reg_dirty = 0;
 	driver->polling_reg_flag = 0;
 	driver->diag_cntl_wq = create_singlethread_workqueue("diag_cntl_wq");
 	if (driver->buf_in_cntl == NULL) {
diff --git a/drivers/char/diag/diagfwd_cntl.h b/drivers/char/diag/diagfwd_cntl.h
index 743ddc1..59e5e6b 100644
--- a/drivers/char/diag/diagfwd_cntl.h
+++ b/drivers/char/diag/diagfwd_cntl.h
@@ -85,7 +85,9 @@
 void diag_smd_cntl_notify(void *ctxt, unsigned event);
 void diag_smd_qdsp_cntl_notify(void *ctxt, unsigned event);
 void diag_smd_wcnss_cntl_notify(void *ctxt, unsigned event);
-
+void diag_clean_modem_reg_fn(struct work_struct *);
+void diag_clean_lpass_reg_fn(struct work_struct *);
+void diag_clean_wcnss_reg_fn(struct work_struct *);
 void diag_debugfs_init(void);
 void diag_debugfs_cleanup(void);
 
diff --git a/drivers/coresight/coresight-tpiu.c b/drivers/coresight/coresight-tpiu.c
index 4b52c4d..c0bcfdd 100644
--- a/drivers/coresight/coresight-tpiu.c
+++ b/drivers/coresight/coresight-tpiu.c
@@ -119,9 +119,17 @@
 	if (ret)
 		goto err_clk_rate;
 
+	/* Disable tpiu to support older targets that need this */
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		goto err_clk_enable;
+	__tpiu_disable();
+	clk_disable_unprepare(drvdata->clk);
+
 	dev_info(drvdata->dev, "TPIU initialized\n");
 	return 0;
 
+err_clk_enable:
 err_clk_rate:
 	clk_put(drvdata->clk);
 err_clk_get:
diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c
index 055ef55..a17ac9a 100644
--- a/drivers/coresight/coresight.c
+++ b/drivers/coresight/coresight.c
@@ -20,71 +20,351 @@
 #include <linux/err.h>
 #include <linux/export.h>
 #include <linux/slab.h>
-#include <linux/mutex.h>
+#include <linux/semaphore.h>
 #include <linux/clk.h>
 #include <linux/coresight.h>
 
 #include "coresight-priv.h"
 
+
+#define NO_SINK		(-1)
 #define MAX_STR_LEN	(65535)
 
 
+static int curr_sink = NO_SINK;
 static LIST_HEAD(coresight_orph_conns);
-static DEFINE_MUTEX(coresight_conns_mutex);
 static LIST_HEAD(coresight_devs);
-static DEFINE_MUTEX(coresight_devs_mutex);
+static DEFINE_SEMAPHORE(coresight_mutex);
 
 
-int coresight_enable(struct coresight_device *csdev, int port)
+static int coresight_find_link_inport(struct coresight_device *csdev)
 {
 	int i;
-	int ret;
+	struct coresight_device *parent;
 	struct coresight_connection *conn;
 
-	mutex_lock(&csdev->mutex);
-	if (csdev->refcnt[port] == 0) {
-		for (i = 0; i < csdev->nr_conns; i++) {
-			conn = &csdev->conns[i];
-			ret = coresight_enable(conn->child_dev,
-					       conn->child_port);
-			if (ret)
-				goto err_enable_child;
-		}
-		if (csdev->ops->enable)
-			ret = csdev->ops->enable(csdev, port);
-		if (ret)
-			goto err_enable;
+	parent = container_of(csdev->path_link.next, struct coresight_device,
+			     path_link);
+	for (i = 0; i < parent->nr_conns; i++) {
+		conn = &parent->conns[i];
+		if (conn->child_dev == csdev)
+			return conn->child_port;
 	}
-	csdev->refcnt[port]++;
-	mutex_unlock(&csdev->mutex);
+
+	pr_err("coresight: couldn't find inport, parent: %d, child: %d\n",
+	       parent->id, csdev->id);
 	return 0;
-err_enable_child:
-	while (i) {
-		conn = &csdev->conns[--i];
-		coresight_disable(conn->child_dev, conn->child_port);
+}
+
+static int coresight_find_link_outport(struct coresight_device *csdev)
+{
+	int i;
+	struct coresight_device *child;
+	struct coresight_connection *conn;
+
+	child = container_of(csdev->path_link.prev, struct coresight_device,
+			      path_link);
+	for (i = 0; i < csdev->nr_conns; i++) {
+		conn = &csdev->conns[i];
+		if (conn->child_dev == child)
+			return conn->outport;
 	}
-err_enable:
-	mutex_unlock(&csdev->mutex);
+
+	pr_err("coresight: couldn't find outport, parent: %d, child: %d\n",
+	       csdev->id, child->id);
+	return 0;
+}
+
+static int coresight_enable_sink(struct coresight_device *csdev)
+{
+	int ret;
+
+	if (csdev->refcnt.sink_refcnt == 0) {
+		if (csdev->ops->sink_ops->enable) {
+			ret = csdev->ops->sink_ops->enable(csdev);
+			if (ret)
+				goto err;
+			csdev->enable = true;
+		}
+	}
+	csdev->refcnt.sink_refcnt++;
+
+	return 0;
+err:
+	return ret;
+}
+
+static void coresight_disable_sink(struct coresight_device *csdev)
+{
+	if (csdev->refcnt.sink_refcnt == 1) {
+		if (csdev->ops->sink_ops->disable) {
+			csdev->ops->sink_ops->disable(csdev);
+			csdev->enable = false;
+		}
+	}
+	csdev->refcnt.sink_refcnt--;
+}
+
+static int coresight_enable_link(struct coresight_device *csdev)
+{
+	int ret;
+	int link_subtype;
+	int refport, inport, outport;
+
+	inport = coresight_find_link_inport(csdev);
+	outport = coresight_find_link_outport(csdev);
+
+	link_subtype = csdev->subtype.link_subtype;
+	if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+		refport = inport;
+	else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+		refport = outport;
+	else
+		refport = 0;
+
+	if (csdev->refcnt.link_refcnts[refport] == 0) {
+		if (csdev->ops->link_ops->enable) {
+			ret = csdev->ops->link_ops->enable(csdev, inport,
+							   outport);
+			if (ret)
+				goto err;
+			csdev->enable = true;
+		}
+	}
+	csdev->refcnt.link_refcnts[refport]++;
+
+	return 0;
+err:
+	return ret;
+}
+
+static void coresight_disable_link(struct coresight_device *csdev)
+{
+	int link_subtype;
+	int refport, inport, outport;
+
+	inport = coresight_find_link_inport(csdev);
+	outport = coresight_find_link_outport(csdev);
+
+	link_subtype = csdev->subtype.link_subtype;
+	if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+		refport = inport;
+	else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+		refport = outport;
+	else
+		refport = 0;
+
+	if (csdev->refcnt.link_refcnts[refport] == 1) {
+		if (csdev->ops->link_ops->disable) {
+			csdev->ops->link_ops->disable(csdev, inport, outport);
+			csdev->enable = false;
+		}
+	}
+	csdev->refcnt.link_refcnts[refport]--;
+}
+
+static int coresight_enable_source(struct coresight_device *csdev)
+{
+	int ret;
+
+	if (csdev->refcnt.source_refcnt == 0) {
+		if (csdev->ops->source_ops->enable) {
+			ret = csdev->ops->source_ops->enable(csdev);
+			if (ret)
+				goto err;
+			csdev->enable = true;
+		}
+	}
+	csdev->refcnt.source_refcnt++;
+
+	return 0;
+err:
+	return ret;
+}
+
+static void coresight_disable_source(struct coresight_device *csdev)
+{
+	if (csdev->refcnt.source_refcnt == 1) {
+		if (csdev->ops->source_ops->disable) {
+			csdev->ops->source_ops->disable(csdev);
+			csdev->enable = false;
+		}
+	}
+	csdev->refcnt.source_refcnt--;
+}
+
+static struct list_head *coresight_build_path(struct coresight_device *csdev,
+					      struct list_head *path)
+{
+	int i;
+	struct list_head *p;
+	struct coresight_connection *conn;
+
+	if (csdev->id == curr_sink) {
+		list_add_tail(&csdev->path_link, path);
+		return path;
+	}
+
+	for (i = 0; i < csdev->nr_conns; i++) {
+		conn = &csdev->conns[i];
+		p = coresight_build_path(conn->child_dev, path);
+		if (p) {
+			list_add_tail(&csdev->path_link, p);
+			return p;
+		}
+	}
+	return NULL;
+}
+
+static void coresight_release_path(struct list_head *path)
+{
+	struct coresight_device *cd, *temp;
+
+	list_for_each_entry_safe(cd, temp, path, path_link)
+		list_del(&cd->path_link);
+}
+
+static int coresight_enable_path(struct list_head *path, bool incl_source)
+{
+	int ret = 0;
+	struct coresight_device *cd;
+
+	list_for_each_entry(cd, path, path_link) {
+		if (cd == list_first_entry(path, struct coresight_device,
+					   path_link)) {
+			ret = coresight_enable_sink(cd);
+		} else if (list_is_last(&cd->path_link, path)) {
+			if (incl_source)
+				ret = coresight_enable_source(cd);
+		} else {
+			ret = coresight_enable_link(cd);
+		}
+		if (ret)
+			goto err;
+	}
+	return 0;
+err:
+	list_for_each_entry_continue_reverse(cd, path, path_link) {
+		if (cd == list_first_entry(path, struct coresight_device,
+					   path_link)) {
+			coresight_disable_sink(cd);
+		} else if (list_is_last(&cd->path_link, path)) {
+			if (incl_source)
+				coresight_disable_source(cd);
+		} else {
+			coresight_disable_link(cd);
+		}
+	}
+	return ret;
+}
+
+static void coresight_disable_path(struct list_head *path, bool incl_source)
+{
+	struct coresight_device *cd;
+
+	list_for_each_entry(cd, path, path_link) {
+		if (cd == list_first_entry(path, struct coresight_device,
+					   path_link)) {
+			coresight_disable_sink(cd);
+		} else if (list_is_last(&cd->path_link, path)) {
+			if (incl_source)
+				coresight_disable_source(cd);
+		} else {
+			coresight_disable_link(cd);
+		}
+	}
+}
+
+static int coresight_switch_sink(struct coresight_device *csdev)
+{
+	int ret = 0;
+	LIST_HEAD(path);
+	struct coresight_device *cd;
+
+	if (IS_ERR_OR_NULL(csdev))
+		return -EINVAL;
+
+	down(&coresight_mutex);
+	if (csdev->id == curr_sink)
+		goto out;
+
+	list_for_each_entry(cd, &coresight_devs, dev_link) {
+		if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable) {
+			coresight_build_path(cd, &path);
+			coresight_disable_path(&path, false);
+			coresight_release_path(&path);
+		}
+	}
+	curr_sink = csdev->id;
+	list_for_each_entry(cd, &coresight_devs, dev_link) {
+		if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable) {
+			coresight_build_path(cd, &path);
+			ret = coresight_enable_path(&path, false);
+			coresight_release_path(&path);
+			if (ret)
+				goto err;
+		}
+	}
+out:
+	up(&coresight_mutex);
+	return 0;
+err:
+	list_for_each_entry(cd, &coresight_devs, dev_link) {
+		if (cd->type == CORESIGHT_DEV_TYPE_SOURCE && cd->enable)
+			coresight_disable_source(cd);
+	}
+	pr_err("coresight: sink switch failed, sources disabled; try again\n");
+	return ret;
+}
+
+int coresight_enable(struct coresight_device *csdev)
+{
+	int ret = 0;
+	LIST_HEAD(path);
+
+	if (IS_ERR_OR_NULL(csdev))
+		return -EINVAL;
+
+	down(&coresight_mutex);
+	if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
+		ret = -EINVAL;
+		pr_err("coresight: wrong device type in %s\n", __func__);
+		goto out;
+	}
+	if (csdev->enable)
+		goto out;
+
+	coresight_build_path(csdev, &path);
+	ret = coresight_enable_path(&path, true);
+	coresight_release_path(&path);
+	if (ret)
+		pr_err("coresight: enable failed\n");
+out:
+	up(&coresight_mutex);
 	return ret;
 }
 EXPORT_SYMBOL(coresight_enable);
 
-void coresight_disable(struct coresight_device *csdev, int port)
+void coresight_disable(struct coresight_device *csdev)
 {
-	int i;
-	struct coresight_connection *conn;
+	LIST_HEAD(path);
 
-	mutex_lock(&csdev->mutex);
-	if (csdev->refcnt[port] == 1) {
-		if (csdev->ops->disable)
-			csdev->ops->disable(csdev, port);
-		for (i = 0; i < csdev->nr_conns; i++) {
-			conn = &csdev->conns[i];
-			coresight_disable(conn->child_dev, conn->child_port);
-		}
+	if (IS_ERR_OR_NULL(csdev))
+		return;
+
+	down(&coresight_mutex);
+	if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
+		pr_err("coresight: wrong device type in %s\n", __func__);
+		goto out;
 	}
-	csdev->refcnt[port]--;
-	mutex_unlock(&csdev->mutex);
+	if (!csdev->enable)
+		goto out;
+
+	coresight_build_path(csdev, &path);
+	coresight_disable_path(&path, true);
+	coresight_release_path(&path);
+out:
+	up(&coresight_mutex);
 }
 EXPORT_SYMBOL(coresight_disable);
 
@@ -104,6 +384,39 @@
 	.dev_attrs	= coresight_dev_attrs,
 };
 
+static ssize_t coresight_show_curr_sink(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct coresight_device *csdev = to_coresight_device(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 csdev->id == curr_sink ? 1 : 0);
+}
+
+static ssize_t coresight_store_curr_sink(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf, size_t size)
+{
+	int ret = 0;
+	unsigned long val;
+	struct coresight_device *csdev = to_coresight_device(dev);
+
+	if (sscanf(buf, "%lx", &val) != 1)
+		return -EINVAL;
+
+	if (val)
+		ret = coresight_switch_sink(csdev);
+	else
+		ret = -EINVAL;
+
+	if (ret)
+		return ret;
+	return size;
+}
+static DEVICE_ATTR(curr_sink, S_IRUGO | S_IWUSR, coresight_show_curr_sink,
+		   coresight_store_curr_sink);
+
 static ssize_t coresight_show_enable(struct device *dev,
 				     struct device_attribute *attr, char *buf)
 {
@@ -124,9 +437,9 @@
 		return -EINVAL;
 
 	if (val)
-		ret = coresight_enable(csdev, 0);
+		ret = coresight_enable(csdev);
 	else
-		coresight_disable(csdev, 0);
+		coresight_disable(csdev);
 
 	if (ret)
 		return ret;
@@ -135,38 +448,55 @@
 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, coresight_show_enable,
 		   coresight_store_enable);
 
-static struct attribute *coresight_attrs[] = {
+static struct attribute *coresight_attrs_sink[] = {
+	&dev_attr_curr_sink.attr,
+	NULL,
+};
+
+static struct attribute_group coresight_attr_grp_sink = {
+	.attrs = coresight_attrs_sink,
+};
+
+static const struct attribute_group *coresight_attr_grps_sink[] = {
+	&coresight_attr_grp_sink,
+	NULL,
+};
+
+static struct attribute *coresight_attrs_source[] = {
 	&dev_attr_enable.attr,
 	NULL,
 };
 
-static struct attribute_group coresight_attr_grp = {
-	.attrs = coresight_attrs,
+static struct attribute_group coresight_attr_grp_source = {
+	.attrs = coresight_attrs_source,
 };
 
-static const struct attribute_group *coresight_attr_grps[] = {
-	&coresight_attr_grp,
+static const struct attribute_group *coresight_attr_grps_source[] = {
+	&coresight_attr_grp_source,
 	NULL,
 };
 
-static struct device_type coresight_dev_type[CORESIGHT_DEV_TYPE_MAX] = {
+static struct device_type coresight_dev_type[] = {
 	{
-		.name = "source",
-		.groups = coresight_attr_grps,
+		.name = "sink",
+		.groups = coresight_attr_grps_sink,
 	},
 	{
 		.name = "link",
 	},
 	{
-		.name = "sink",
-		.groups = coresight_attr_grps,
+		.name = "linksink",
+		.groups = coresight_attr_grps_sink,
+	},
+	{
+		.name = "source",
+		.groups = coresight_attr_grps_source,
 	},
 };
 
 static void coresight_device_release(struct device *dev)
 {
 	struct coresight_device *csdev = to_coresight_device(dev);
-	mutex_destroy(&csdev->mutex);
 	kfree(csdev);
 }
 
@@ -174,14 +504,12 @@
 {
 	struct coresight_connection *conn, *temp;
 
-	mutex_lock(&coresight_conns_mutex);
 	list_for_each_entry_safe(conn, temp, &coresight_orph_conns, link) {
 		if (conn->child_id == csdev->id) {
 			conn->child_dev = csdev;
 			list_del(&conn->link);
 		}
 	}
-	mutex_unlock(&coresight_conns_mutex);
 }
 
 static void coresight_fixup_device_conns(struct coresight_device *csdev)
@@ -192,21 +520,16 @@
 
 	for (i = 0; i < csdev->nr_conns; i++) {
 		found = false;
-		mutex_lock(&coresight_devs_mutex);
-		list_for_each_entry(cd, &coresight_devs, link) {
+		list_for_each_entry(cd, &coresight_devs, dev_link) {
 			if (csdev->conns[i].child_id == cd->id) {
 				csdev->conns[i].child_dev = cd;
 				found = true;
 				break;
 			}
 		}
-		mutex_unlock(&coresight_devs_mutex);
-		if (!found) {
-			mutex_lock(&coresight_conns_mutex);
+		if (!found)
 			list_add_tail(&csdev->conns[i].link,
 				      &coresight_orph_conns);
-			mutex_unlock(&coresight_conns_mutex);
-		}
 	}
 }
 
@@ -214,7 +537,9 @@
 {
 	int i;
 	int ret;
-	int *refcnt;
+	int link_subtype;
+	int nr_refcnts;
+	int *refcnts = NULL;
 	struct coresight_device *csdev;
 	struct coresight_connection *conns;
 
@@ -224,28 +549,41 @@
 		goto err_kzalloc_csdev;
 	}
 
-	mutex_init(&csdev->mutex);
 	csdev->id = desc->pdata->id;
 
-	refcnt = kzalloc(sizeof(*refcnt) * desc->pdata->nr_ports, GFP_KERNEL);
-	if (!refcnt) {
-		ret = -ENOMEM;
-		goto err_kzalloc_refcnt;
-	}
-	csdev->refcnt = refcnt;
+	if (desc->type == CORESIGHT_DEV_TYPE_LINK ||
+	    desc->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+		link_subtype = desc->subtype.link_subtype;
+		if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+			nr_refcnts = desc->pdata->nr_inports;
+		else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+			nr_refcnts = desc->pdata->nr_outports;
+		else
+			nr_refcnts = 1;
 
-	csdev->nr_conns = desc->pdata->nr_children;
+		refcnts = kzalloc(sizeof(*refcnts) * nr_refcnts, GFP_KERNEL);
+		if (!refcnts) {
+			ret = -ENOMEM;
+			goto err_kzalloc_refcnts;
+		}
+		csdev->refcnt.link_refcnts = refcnts;
+	}
+
+	csdev->nr_conns = desc->pdata->nr_outports;
 	conns = kzalloc(sizeof(*conns) * csdev->nr_conns, GFP_KERNEL);
 	if (!conns) {
 		ret = -ENOMEM;
 		goto err_kzalloc_conns;
 	}
 	for (i = 0; i < csdev->nr_conns; i++) {
+		conns[i].outport = desc->pdata->outports[i];
 		conns[i].child_id = desc->pdata->child_ids[i];
 		conns[i].child_port = desc->pdata->child_ports[i];
 	}
 	csdev->conns = conns;
 
+	csdev->type = desc->type;
+	csdev->subtype = desc->subtype;
 	csdev->ops = desc->ops;
 	csdev->owner = desc->owner;
 
@@ -256,24 +594,34 @@
 	csdev->dev.release = coresight_device_release;
 	dev_set_name(&csdev->dev, "%s", desc->pdata->name);
 
+	down(&coresight_mutex);
+	if (desc->pdata->default_sink) {
+		if (curr_sink == NO_SINK) {
+			curr_sink = csdev->id;
+		} else {
+			ret = -EINVAL;
+			goto err_default_sink;
+		}
+	}
+
 	coresight_fixup_device_conns(csdev);
 	ret = device_register(&csdev->dev);
 	if (ret)
 		goto err_dev_reg;
 	coresight_fixup_orphan_conns(csdev);
 
-	mutex_lock(&coresight_devs_mutex);
-	list_add_tail(&csdev->link, &coresight_devs);
-	mutex_unlock(&coresight_devs_mutex);
+	list_add_tail(&csdev->dev_link, &coresight_devs);
+	up(&coresight_mutex);
 
 	return csdev;
 err_dev_reg:
 	put_device(&csdev->dev);
+err_default_sink:
+	up(&coresight_mutex);
 	kfree(conns);
 err_kzalloc_conns:
-	kfree(refcnt);
-err_kzalloc_refcnt:
-	mutex_destroy(&csdev->mutex);
+	kfree(refcnts);
+err_kzalloc_refcnts:
 	kfree(csdev);
 err_kzalloc_csdev:
 	return ERR_PTR(ret);
@@ -286,9 +634,7 @@
 		return;
 
 	if (get_device(&csdev->dev)) {
-		mutex_lock(&csdev->mutex);
 		device_unregister(&csdev->dev);
-		mutex_unlock(&csdev->mutex);
 		put_device(&csdev->dev);
 	}
 }
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index fb1ffd0..4657c37 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -288,6 +288,9 @@
 config CRYPTO_DEV_QCE40
 	bool
 
+config CRYPTO_DEV_QCE50
+	bool
+
 config CRYPTO_DEV_QCRYPTO
 	tristate "Qualcomm Crypto accelerator"
 	select CRYPTO_DES
@@ -303,20 +306,22 @@
 config CRYPTO_DEV_QCE
 	tristate "Qualcomm Crypto Engine (QCE) module"
 	select  CRYPTO_DEV_QCE40 if ARCH_MSM8960 || ARCH_MSM9615
+	select  CRYPTO_DEV_QCE50 if ARCH_MSM8974
 	default n
 	help
           This driver supports Qualcomm Crypto Engine in MSM7x30, MSM8660
 	  MSM8x55, MSM8960 and MSM9615
 	  To compile this driver as a module, choose M here: the
 	  For MSM7x30 MSM8660 and MSM8x55 the module is called qce
-	  For MSM8960 and MSM9615 the module is called qce40
+	  For MSM8960, APQ8064 and MSM9615 the module is called qce40
+	  For MSM8974 the module is called qce50
 
 config CRYPTO_DEV_QCEDEV
 	tristate "QCEDEV Interface to CE module"
 	default n
 	help
           This driver supports Qualcomm QCEDEV Crypto in MSM7x30, MSM8660,
-          MSM8960 and MSM9615.
+          MSM8960, MSM9615, APQ8064 and MSM8974.
           This exposes the interface to the QCE hardware accelerator via IOCTLs
 	  To compile this driver as a module, choose M here: the
 	  module will be called qcedev.
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
index 61406b9..df9acf2 100644
--- a/drivers/crypto/msm/Makefile
+++ b/drivers/crypto/msm/Makefile
@@ -1,8 +1,12 @@
 obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
-ifeq ($(CONFIG_CRYPTO_DEV_QCE40), y)
-	obj-$(CONFIG_CRYPTO_DEV_QCE) += qce40.o
+ifeq ($(CONFIG_CRYPTO_DEV_QCE50), y)
+	obj-$(CONFIG_CRYPTO_DEV_QCE) += qce50.o
 else
-	obj-$(CONFIG_CRYPTO_DEV_QCE) += qce.o
+	ifeq ($(CONFIG_CRYPTO_DEV_QCE40), y)
+		obj-$(CONFIG_CRYPTO_DEV_QCE) += qce40.o
+	else
+		obj-$(CONFIG_CRYPTO_DEV_QCE) += qce.o
+	endif
 endif
 obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
 obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c
index 55cf651..4a9729c 100644
--- a/drivers/crypto/msm/qce.c
+++ b/drivers/crypto/msm/qce.c
@@ -1,6 +1,6 @@
 /* Qualcomm Crypto Engine driver.
  *
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -2463,6 +2463,8 @@
 	ce_support->aes_xts  = false;
 	ce_support->aes_ccm  = false;
 	ce_support->ota = pce_dev->ota;
+	ce_support->aligned_only = false;
+	ce_support->bam = false;
 	return 0;
 }
 EXPORT_SYMBOL(qce_hw_support);
@@ -2703,7 +2705,5 @@
 EXPORT_SYMBOL(qce_f9_req);
 
 MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
 MODULE_DESCRIPTION("Crypto Engine driver");
-MODULE_VERSION("1.15");
 
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
index edd2089..037861c 100644
--- a/drivers/crypto/msm/qce.h
+++ b/drivers/crypto/msm/qce.h
@@ -1,6 +1,6 @@
 /* Qualcomm Crypto Engine driver API
  *
- * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -71,6 +71,7 @@
 	QCE_HASH_SHA1_HMAC   = 2,
 	QCE_HASH_SHA256_HMAC = 3,
 	QCE_HASH_AES_CMAC = 4,
+	QCE_AEAD_SHA1_HMAC = 5,
 	QCE_HASH_LAST
 };
 
@@ -110,6 +111,8 @@
 	bool aes_xts;
 	bool aes_ccm;
 	bool ota;
+	bool aligned_only;
+	bool bam;
 };
 
 /* Sha operation parameters */
diff --git a/drivers/crypto/msm/qce40.c b/drivers/crypto/msm/qce40.c
index c203fc5..7a229a5c 100644
--- a/drivers/crypto/msm/qce40.c
+++ b/drivers/crypto/msm/qce40.c
@@ -2599,11 +2599,11 @@
 	ce_support->aes_xts = true;
 	ce_support->aes_ccm = true;
 	ce_support->ota = false;
+	ce_support->aligned_only = false;
+	ce_support->bam = false;
 	return 0;
 }
 EXPORT_SYMBOL(qce_hw_support);
 
 MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
 MODULE_DESCRIPTION("Crypto Engine driver");
-MODULE_VERSION("2.17");
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
new file mode 100644
index 0000000..4ccd89d
--- /dev/null
+++ b/drivers/crypto/msm/qce50.c
@@ -0,0 +1,2739 @@
+/* Qualcomm Crypto Engine driver.
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/qcedev.h>
+#include <linux/bitops.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <mach/dma.h>
+#include <mach/clk.h>
+#include <mach/socinfo.h>
+
+#include "qce.h"
+#include "qce50.h"
+#include "qcryptohw_50.h"
+
+#define CRYPTO_CONFIG_RESET 0xE001F
+
+static DEFINE_MUTEX(bam_register_cnt);
+struct bam_registration_info {
+	uint32_t handle;
+	uint32_t cnt;
+};
+static struct bam_registration_info bam_registry;
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+	struct device *pdev;        /* Handle to platform_device structure */
+
+	unsigned char *coh_vmem;    /* Allocated coherent virtual memory */
+	dma_addr_t coh_pmem;	    /* Allocated coherent physical memory */
+	int memsize;				/* Memory allocated */
+
+	void __iomem *iobase;	    /* Virtual io base of CE HW  */
+	unsigned int phy_iobase;    /* Physical io base of CE HW    */
+
+	struct clk *ce_core_src_clk;	/* Handle to CE src clk*/
+	struct clk *ce_core_clk;	/* Handle to CE clk */
+	struct clk *ce_clk;		/* Handle to CE clk */
+
+	qce_comp_func_ptr_t qce_cb;	/* qce callback function pointer */
+
+	int assoc_nents;
+	int ivsize;
+	int authsize;
+	int src_nents;
+	int dst_nents;
+
+	dma_addr_t phy_iv_in;
+
+	void *areq;
+	enum qce_cipher_mode_enum mode;
+	struct ce_sps_data ce_sps;
+};
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t  _std_init_vector_sha1[] =   {
+	0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+	0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+	0x510E527F, 0x9B05688C,	0x1F83D9AB, 0x5BE0CD19
+};
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned n;
+
+	n = len  / sizeof(uint32_t) ;
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     ;
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   ;
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000) ;
+	}
+}
+
+static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned i, j;
+	unsigned char swap_iv[AES_IV_LENGTH];
+
+	memset(swap_iv, 0, AES_IV_LENGTH);
+	for (i = (AES_IV_LENGTH-len), j = len-1;  i < AES_IV_LENGTH; i++, j--)
+		swap_iv[i] = b[j];
+	_byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+	return i;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int rev;
+	unsigned int maj_rev, min_rev, step_rev;
+
+	rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
+	mb();
+	maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
+	min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
+	step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
+
+	if ((maj_rev != 0x05) || (min_rev > 0x02) || (step_rev > 0x02)) {
+		pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
+			pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+		return -EIO;
+	};
+	if ((min_rev > 0)  && (step_rev != 0)) {
+		pr_err("Unknown Qualcomm crypto device at 0x%x, rev %d.%d.%d\n",
+			pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+		return -EIO;
+	};
+	pce_dev->ce_sps.minor_version = min_rev;
+
+	dev_info(pce_dev->pdev, "Qualcomm Crypto %d.%d.%d device found @0x%x\n",
+			maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
+
+	pce_dev->ce_sps.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
+
+	dev_info(pce_dev->pdev,
+			"IO base, CE = 0x%x\n, "
+			"Consumer (IN) PIPE %d,    "
+			"Producer (OUT) PIPE %d\n"
+			"IO base BAM = 0x%x\n"
+			"BAM IRQ %d\n",
+			(uint32_t) pce_dev->iobase,
+			pce_dev->ce_sps.dest_pipe_index,
+			pce_dev->ce_sps.src_pipe_index,
+			(uint32_t)pce_dev->ce_sps.bam_iobase,
+			pce_dev->ce_sps.bam_irq);
+	return 0;
+};
+
+static int _ce_get_hash_cmdlistinfo(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq,
+				struct qce_cmdlist_info **cmdplistinfo)
+{
+	struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		*cmdplistinfo = &cmdlistptr->auth_sha1;
+		break;
+
+	case QCE_HASH_SHA256:
+		*cmdplistinfo = &cmdlistptr->auth_sha256;
+		break;
+
+	case QCE_HASH_SHA1_HMAC:
+		*cmdplistinfo = &cmdlistptr->auth_sha1_hmac;
+			break;
+
+	case QCE_HASH_SHA256_HMAC:
+		*cmdplistinfo = &cmdlistptr->auth_sha256_hmac;
+		break;
+
+	case QCE_HASH_AES_CMAC:
+		if (sreq->authklen == AES128_KEY_SIZE)
+			*cmdplistinfo = &cmdlistptr->auth_aes_128_cmac;
+		else
+			*cmdplistinfo = &cmdlistptr->auth_aes_256_cmac;
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int _ce_setup_hash(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq,
+				struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	bool sha1 = false;
+	struct sps_command_element *pce = NULL;
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC) ||
+			(sreq->alg ==  QCE_HASH_AES_CMAC)) {
+		uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+
+		_byte_stream_to_net_words(mackey32, sreq->authkey,
+						sreq->authklen);
+
+		/* check for null key. If null, use hw key*/
+		for (i = 0; i < authk_size_in_word; i++) {
+			if (mackey32[i] != 0)
+				break;
+		}
+
+		pce = cmdlistinfo->go_proc;
+		if (i == authk_size_in_word) {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
+							pce_dev->phy_iobase);
+		} else {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+							pce_dev->phy_iobase);
+			pce = cmdlistinfo->auth_key;
+			for (i = 0; i < authk_size_in_word; i++, pce++)
+				pce->data = mackey32[i];
+		}
+	}
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC)
+		goto go_proc;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+	case QCE_HASH_SHA1_HMAC:
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+	case QCE_HASH_SHA256_HMAC:
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < 5; i++, pce++)
+		pce->data = auth32[i];
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		for (i = 5; i < 8; i++, pce++)
+			pce->data = auth32[i];
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	pce = cmdlistinfo->auth_bytecount;
+	for (i = 0; i < 2; i++, pce++)
+		pce->data = sreq->auth_data[i];
+
+	/* Set/reset  last bit in CFG register  */
+	pce = cmdlistinfo->auth_seg_cfg;
+	if (sreq->last_blk)
+		pce->data |= 1 << CRYPTO_LAST;
+	else
+		pce->data &= ~(1 << CRYPTO_LAST);
+	if (sreq->first_blk)
+		pce->data |= 1 << CRYPTO_FIRST;
+	else
+		pce->data &= ~(1 << CRYPTO_FIRST);
+go_proc:
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = sreq->size;
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size */
+	pce = cmdlistinfo->seg_size;
+	pce->data = sreq->size;
+
+	return 0;
+}
+
+static int _ce_get_cipher_cmdlistinfo(struct qce_device *pce_dev,
+				struct qce_req *creq,
+				struct qce_cmdlist_info **cmdlistinfo)
+{
+	struct qce_cmdlistptr_ops *cmdlistptr = &pce_dev->ce_sps.cmdlistptr;
+
+	if (creq->alg != CIPHER_ALG_AES) {
+		switch (creq->alg) {
+		case CIPHER_ALG_DES:
+			if (creq->mode ==  QCE_MODE_ECB)
+				*cmdlistinfo = &cmdlistptr->cipher_des_ecb;
+			else
+				*cmdlistinfo = &cmdlistptr->cipher_des_cbc;
+			break;
+
+		case CIPHER_ALG_3DES:
+			if (creq->mode ==  QCE_MODE_ECB)
+				*cmdlistinfo =
+					&cmdlistptr->cipher_3des_ecb;
+			else
+				*cmdlistinfo =
+					&cmdlistptr->cipher_3des_cbc;
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (creq->mode) {
+		case QCE_MODE_ECB:
+			if (creq->encklen ==  AES128_KEY_SIZE)
+				*cmdlistinfo = &cmdlistptr->cipher_aes_128_ecb;
+			else
+				*cmdlistinfo = &cmdlistptr->cipher_aes_256_ecb;
+			break;
+
+		case QCE_MODE_CBC:
+		case QCE_MODE_CTR:
+			if (creq->encklen ==  AES128_KEY_SIZE)
+				*cmdlistinfo =
+					&cmdlistptr->cipher_aes_128_cbc_ctr;
+			else
+				*cmdlistinfo =
+					&cmdlistptr->cipher_aes_256_cbc_ctr;
+			break;
+
+		case QCE_MODE_XTS:
+			if (creq->encklen ==  AES128_KEY_SIZE)
+				*cmdlistinfo = &cmdlistptr->cipher_aes_128_xts;
+			else
+				*cmdlistinfo = &cmdlistptr->cipher_aes_256_xts;
+			break;
+
+		case QCE_MODE_CCM:
+			if (creq->encklen ==  AES128_KEY_SIZE)
+				*cmdlistinfo = &cmdlistptr->aead_aes_128_ccm;
+			else
+				*cmdlistinfo = &cmdlistptr->aead_aes_256_ccm;
+			break;
+
+		default:
+			break;
+		}
+	}
+	return 0;
+}
+
+static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
+		uint32_t totallen_in, uint32_t coffset,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t key_size;
+	bool use_hw_key = false;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+	int i;
+	struct sps_command_element *pce = NULL;
+
+	if (creq->mode == QCE_MODE_XTS)
+		key_size = creq->encklen/2;
+	else
+		key_size = creq->encklen;
+
+	_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+
+	/* check for null key. If null, use hw key*/
+	enck_size_in_word = key_size/sizeof(uint32_t);
+	for (i = 0; i < enck_size_in_word; i++) {
+		if (enckey32[i] != 0)
+			break;
+	}
+	pce = cmdlistinfo->go_proc;
+	if (i == enck_size_in_word) {
+		use_hw_key = true;
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_OEM_KEY_REG +
+						pce_dev->phy_iobase);
+	} else {
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+						pce_dev->phy_iobase);
+	}
+
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		pce = cmdlistinfo->auth_nonce_info;
+		for (i = 0; i < noncelen32; i++, pce++)
+			pce->data = nonce32[i];
+
+		/* TBD  NEW FEATURE partial AES CCM  pkt support set last bit */
+		auth_cfg |= ((1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST));
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+		auth_cfg |= (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE);
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 <<
+						CRYPTO_AUTH_KEY_SIZE);
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 <<
+							CRYPTO_AUTH_KEY_SIZE);
+		}
+		auth_cfg |= (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG);
+		auth_cfg |= ((MAX_NONCE/sizeof(uint32_t)) <<
+						CRYPTO_AUTH_NONCE_NUM_WORDS);
+
+		if (use_hw_key == true)	{
+			auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+		} else {
+			auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+			/* write auth key */
+			pce = cmdlistinfo->auth_key;
+			for (i = 0; i < authklen32; i++, pce++)
+				pce->data = enckey32[i];
+		}
+
+		pce = cmdlistinfo->auth_seg_cfg;
+		pce->data = auth_cfg;
+
+		pce = cmdlistinfo->auth_seg_size;
+		pce->data = totallen_in;
+		pce = cmdlistinfo->auth_seg_start;
+		pce->data = 0;
+	}
+
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		encr_cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+		break;
+	case QCE_MODE_CBC:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+		break;
+	case QCE_MODE_XTS:
+		encr_cfg |= (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+		break;
+	case QCE_MODE_CCM:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE);
+		break;
+	case QCE_MODE_CTR:
+	default:
+		encr_cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+		break;
+	}
+	pce_dev->mode = creq->mode;
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (use_hw_key == false) {
+			pce = cmdlistinfo->encr_key;
+			pce->data = enckey32[0];
+			pce++;
+			pce->data = enckey32[1];
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (use_hw_key == false) {
+			/* write encr key */
+			pce = cmdlistinfo->encr_key;
+			for (i = 0; i < 6; i++, pce++)
+				pce->data = enckey32[i];
+		}
+		break;
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			_byte_stream_to_net_words(xtskey32, (creq->enckey +
+					creq->encklen/2), creq->encklen/2);
+			/* write xts encr key */
+			pce = cmdlistinfo->encr_xts_key;
+			for (i = 0; i < xtsklen; i++, pce++)
+				pce->data = xtskey32[i];
+
+			/* write xts du size */
+			pce = cmdlistinfo->encr_xts_du_size;
+			pce->data = creq->cryptlen;
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+			/* write encr cntr iv */
+			pce = cmdlistinfo->encr_cntr_iv;
+			for (i = 0; i < 4; i++, pce++)
+				pce->data = enciv32[i];
+
+			if (creq->mode ==  QCE_MODE_CCM) {
+				/* write cntr iv for ccm */
+				pce = cmdlistinfo->encr_ccm_cntr_iv;
+				for (i = 0; i < 4; i++, pce++)
+					pce->data = enciv32[i];
+				/* update cntr_iv[3] by one */
+				pce = cmdlistinfo->encr_cntr_iv;
+				pce += 3;
+				pce->data += 1;
+			}
+		}
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+				encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ);
+			encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+		} else {
+			if (use_hw_key == false) {
+				/* write encr key */
+				pce = cmdlistinfo->encr_key;
+				for (i = 0; i < enck_size_in_word; i++, pce++)
+					pce->data = enckey32[i];
+				switch (key_size) {
+				case AES128_KEY_SIZE:
+					encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128
+							 << CRYPTO_ENCR_KEY_SZ);
+					break;
+				case AES256_KEY_SIZE:
+				default:
+					encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES256
+							 << CRYPTO_ENCR_KEY_SZ);
+				break;
+				} /* end of switch (creq->encklen) */
+			}
+			encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG;
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
+		if (creq->dir == QCE_ENCRYPT)
+			pce->data |= (1 << CRYPTO_ENCODE);
+		else
+			pce->data &= ~(1 << CRYPTO_ENCODE);
+		encr_cfg = pce->data;
+	}  else	{
+		encr_cfg |=
+			((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+	}
+	if (use_hw_key == true)
+		encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	else
+		encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	pce->data = encr_cfg;
+
+	/* write encr seg size */
+	pce = cmdlistinfo->encr_seg_size;
+	if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
+		pce->data = (creq->cryptlen + creq->authsize);
+	else
+		pce->data = creq->cryptlen;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (coffset & 0xffff);
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = totallen_in;
+
+	return 0;
+};
+
+static int _aead_complete(struct qce_device *pce_dev)
+{
+	struct aead_request *areq;
+	unsigned char mac[SHA256_DIGEST_SIZE];
+
+	areq = (struct aead_request *) pce_dev->areq;
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+					DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+			DMA_TO_DEVICE);
+	/* check MAC */
+	memcpy(mac, (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+	if (pce_dev->mode == QCE_MODE_CCM) {
+		uint32_t result_status;
+		result_status = pce_dev->ce_sps.result->status;
+		result_status &= (1 << CRYPTO_MAC_FAILED);
+		result_status |= (pce_dev->ce_sps.consumer_status |
+					pce_dev->ce_sps.producer_status);
+		pce_dev->qce_cb(areq, mac, NULL, result_status);
+	} else {
+		uint32_t ivsize = 0;
+		struct crypto_aead *aead;
+		unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+
+		aead = crypto_aead_reqtfm(areq);
+		ivsize = crypto_aead_ivsize(aead);
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+				ivsize, DMA_TO_DEVICE);
+		memcpy(iv, (char *)(pce_dev->ce_sps.result->encr_cntr_iv),
+			sizeof(iv));
+		pce_dev->qce_cb(areq, mac, iv, pce_dev->ce_sps.consumer_status |
+			pce_dev->ce_sps.producer_status);
+
+	}
+	return 0;
+};
+
+static void _sha_complete(struct qce_device *pce_dev)
+{
+	struct ahash_request *areq;
+	unsigned char digest[SHA256_DIGEST_SIZE];
+
+	areq = (struct ahash_request *) pce_dev->areq;
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				DMA_TO_DEVICE);
+	memcpy(digest, (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+	pce_dev->qce_cb(areq, digest,
+			(char *)pce_dev->ce_sps.result->auth_byte_count,
+				pce_dev->ce_sps.consumer_status);
+};
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev)
+{
+	struct ablkcipher_request *areq;
+	unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+
+	areq = (struct ablkcipher_request *) pce_dev->areq;
+
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst,
+			pce_dev->dst_nents, DMA_FROM_DEVICE);
+	}
+	dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+
+	if (pce_dev->mode == QCE_MODE_ECB) {
+		pce_dev->qce_cb(areq, NULL, NULL,
+					pce_dev->ce_sps.consumer_status |
+					pce_dev->ce_sps.producer_status);
+	} else {
+		if (pce_dev->ce_sps.minor_version == 0) {
+			if (pce_dev->mode == QCE_MODE_CBC)
+				memcpy(iv, (char *)sg_virt(areq->src),
+							sizeof(iv));
+
+			if ((pce_dev->mode == QCE_MODE_CTR) ||
+				(pce_dev->mode == QCE_MODE_XTS)) {
+				uint32_t num_blk = 0;
+				uint32_t cntr_iv = 0;
+
+				memcpy(iv, areq->info, sizeof(iv));
+				if (pce_dev->mode == QCE_MODE_CTR)
+					num_blk = areq->nbytes/16;
+				cntr_iv = (u32)(((u32)(*(iv + 14))) << 8) |
+							(u32)(*(iv + 15));
+				*(iv + 14) = (char)((cntr_iv + num_blk) >> 8);
+				*(iv + 15) = (char)((cntr_iv + num_blk) & 0xFF);
+			}
+		} else {
+			memcpy(iv,
+				(char *)(pce_dev->ce_sps.result->encr_cntr_iv),
+				sizeof(iv));
+		}
+		pce_dev->qce_cb(areq, NULL, iv,
+			pce_dev->ce_sps.consumer_status |
+			pce_dev->ce_sps.producer_status);
+	}
+	return 0;
+};
+
+#ifdef QCE_DEBUG
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev)
+{
+	int i, j, ents;
+	struct sps_iovec *iovec = pce_dev->ce_sps.in_transfer.iovec;
+	uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_NWD;
+
+	printk(KERN_INFO "==============================================\n");
+	printk(KERN_INFO "CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
+	printk(KERN_INFO "==============================================\n");
+	for (i = 0; i <  pce_dev->ce_sps.in_transfer.iovec_count; i++) {
+		printk(KERN_INFO " [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+					iovec->addr, iovec->size, iovec->flags);
+		if (iovec->flags & cmd_flags) {
+			struct sps_command_element *pced;
+
+			pced = (struct sps_command_element *)
+					(GET_VIRT_ADDR(iovec->addr));
+			ents = iovec->size/(sizeof(struct sps_command_element));
+			for (j = 0; j < ents; j++) {
+				printk(KERN_INFO "      [%d] [0x%x] 0x%x\n", j,
+					pced->addr, pced->data);
+				pced++;
+			}
+		}
+		iovec++;
+	}
+
+	printk(KERN_INFO "==============================================\n");
+	printk(KERN_INFO "PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
+	printk(KERN_INFO "==============================================\n");
+	iovec = pce_dev->ce_sps.out_transfer.iovec;
+	for (i = 0; i <  pce_dev->ce_sps.out_transfer.iovec_count; i++) {
+		printk(KERN_INFO " [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+				iovec->addr, iovec->size, iovec->flags);
+		iovec++;
+	}
+}
+
+#else
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev)
+{
+}
+#endif
+
+static void _qce_sps_iovec_count_init(struct qce_device *pce_dev)
+{
+	pce_dev->ce_sps.in_transfer.iovec_count = 0;
+	pce_dev->ce_sps.out_transfer.iovec_count = 0;
+}
+
+static void _qce_set_eot_flag(struct sps_transfer *sps_bam_pipe)
+{
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					(sps_bam_pipe->iovec_count - 1);
+	iovec->flags |= SPS_IOVEC_FLAG_EOT;
+}
+
+static void _qce_sps_add_data(uint32_t addr, uint32_t len,
+		struct sps_transfer *sps_bam_pipe)
+{
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	if (len) {
+		iovec->size = len;
+		iovec->addr = addr;
+		iovec->flags = 0;
+		sps_bam_pipe->iovec_count++;
+	}
+}
+
+static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
+		struct scatterlist *sg_src, uint32_t nbytes,
+		struct sps_transfer *sps_bam_pipe)
+{
+	uint32_t addr, data_cnt, len;
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+						sps_bam_pipe->iovec_count;
+
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg_src));
+		nbytes -= len;
+		addr = sg_dma_address(sg_src);
+		if (pce_dev->ce_sps.minor_version == 0)
+			len = ALIGN(len, pce_dev->ce_sps.ce_burst_size);
+		while (len > 0) {
+			if (len > SPS_MAX_PKT_SIZE) {
+				data_cnt = SPS_MAX_PKT_SIZE;
+				iovec->size = data_cnt;
+				iovec->addr = addr;
+				iovec->flags = 0;
+			} else {
+				data_cnt = len;
+				iovec->size = data_cnt;
+				iovec->addr = addr;
+				iovec->flags = 0;
+			}
+			iovec++;
+			sps_bam_pipe->iovec_count++;
+			addr += data_cnt;
+			len -= data_cnt;
+		}
+		sg_src++;
+	}
+	return 0;
+}
+
+static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
+				struct qce_cmdlist_info *cmdptr,
+				struct sps_transfer *sps_bam_pipe)
+{
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	iovec->size = cmdptr->size;
+	iovec->addr = GET_PHYS_ADDR(cmdptr->cmdlist);
+	iovec->flags = SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_NWD | flag;
+	sps_bam_pipe->iovec_count++;
+
+	return 0;
+}
+
+static int _qce_sps_transfer(struct qce_device *pce_dev)
+{
+	int rc = 0;
+
+	_qce_dump_descr_fifos(pce_dev);
+	rc = sps_transfer(pce_dev->ce_sps.consumer.pipe,
+					  &pce_dev->ce_sps.in_transfer);
+	if (rc) {
+		pr_err("sps_xfr() fail (consumer pipe=0x%x) rc = %d,",
+				(u32)pce_dev->ce_sps.consumer.pipe, rc);
+		return rc;
+	}
+	rc = sps_transfer(pce_dev->ce_sps.producer.pipe,
+					  &pce_dev->ce_sps.out_transfer);
+	if (rc) {
+		pr_err("sps_xfr() fail (producer pipe=0x%x) rc = %d,",
+				(u32)pce_dev->ce_sps.producer.pipe, rc);
+		return rc;
+	}
+	return rc;
+}
+
+/**
+ * Allocate and Connect a CE peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context and
+ * connect it with memory endpoint by calling
+ * appropriate SPS driver APIs.
+ *
+ * Also registers a SPS callback function with
+ * SPS driver
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ *		 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep,
+				bool is_producer)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_info;
+	struct sps_connect *sps_connect_info = &ep->connect;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Allocate endpoint context */
+	sps_pipe_info = sps_alloc_endpoint();
+	if (!sps_pipe_info) {
+		pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
+			   is_producer);
+		rc = -ENOMEM;
+		goto out;
+	}
+	/* Now save the sps pipe handle */
+	ep->pipe = sps_pipe_info;
+
+	/* Get default connection configuration for an endpoint */
+	rc = sps_get_config(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_get_config() fail pipe_handle=0x%x, rc = %d\n",
+			(u32)sps_pipe_info, rc);
+		goto get_config_err;
+	}
+
+	/* Modify the default connection configuration */
+	if (is_producer) {
+		/*
+		* For CE producer transfer, source should be
+		* CE peripheral where as destination should
+		* be system memory.
+		*/
+		sps_connect_info->source = pce_dev->ce_sps.bam_handle;
+		sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
+		/* Producer pipe will handle this connection */
+		sps_connect_info->mode = SPS_MODE_SRC;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE | SPS_O_EOT;
+	} else {
+		/* For CE consumer transfer, source should be
+		 * system memory where as destination should
+		 * CE peripheral
+		 */
+		sps_connect_info->source = SPS_DEV_HANDLE_MEM;
+		sps_connect_info->destination = pce_dev->ce_sps.bam_handle;
+		sps_connect_info->mode = SPS_MODE_DEST;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE | SPS_O_EOT;
+	}
+
+	/* Producer pipe index */
+	sps_connect_info->src_pipe_index = pce_dev->ce_sps.src_pipe_index;
+	/* Consumer pipe index */
+	sps_connect_info->dest_pipe_index = pce_dev->ce_sps.dest_pipe_index;
+	sps_connect_info->event_thresh = 0x10;
+	/*
+	 * Max. no of scatter/gather buffers that can
+	 * be passed by block layer = 32 (NR_SG).
+	 * Each BAM descritor needs 64 bits (8 bytes).
+	 * One BAM descriptor is required per buffer transfer.
+	 * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
+	 * But due to HW limitation we need to allocate atleast one extra
+	 * descriptor memory (256 bytes + 8 bytes). But in order to be
+	 * in power of 2, we are allocating 512 bytes of memory.
+	 */
+	sps_connect_info->desc.size = 512;
+	sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
+					sps_connect_info->desc.size,
+					&sps_connect_info->desc.phys_base,
+					GFP_KERNEL);
+	if (sps_connect_info->desc.base == NULL) {
+		rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto get_config_err;
+	}
+
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+
+	/* Establish connection between peripheral and memory endpoint */
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe_handle=0x%x, rc = %d\n",
+			(u32)sps_pipe_info, rc);
+		goto sps_connect_err;
+	}
+
+	sps_event->mode = SPS_TRIGGER_CALLBACK;
+	sps_event->options = SPS_O_EOT;
+	sps_event->xfer_done = NULL;
+	sps_event->user = (void *)pce_dev;
+
+	pr_debug("success, %s : pipe_handle=0x%x, desc fifo base (phy) = 0x%x\n",
+		is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
+		(u32)sps_pipe_info, sps_connect_info->desc.phys_base);
+	goto out;
+
+sps_connect_err:
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+get_config_err:
+	sps_free_endpoint(sps_pipe_info);
+out:
+	return rc;
+}
+
+/**
+ * Disconnect and Deallocate a CE peripheral's SPS endpoint
+ *
+ * This function disconnect endpoint and deallocates
+ * endpoint context.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ *
+ */
+static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep)
+{
+	struct sps_pipe *sps_pipe_info = ep->pipe;
+	struct sps_connect *sps_connect_info = &ep->connect;
+
+	sps_disconnect(sps_pipe_info);
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+	sps_free_endpoint(sps_pipe_info);
+}
+/**
+ * Initialize SPS HW connected with CE core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init(struct qce_device *pce_dev)
+{
+	int rc = 0;
+	struct sps_bam_props bam = {0};
+	bool register_bam = false;
+
+	bam.phys_addr = pce_dev->ce_sps.bam_mem;
+	bam.virt_addr = pce_dev->ce_sps.bam_iobase;
+
+	/*
+	 * This event thresold value is only significant for BAM-to-BAM
+	 * transfer. It's ignored for BAM-to-System mode transfer.
+	 */
+	bam.event_threshold = 0x10;	/* Pipe event threshold */
+	/*
+	 * This threshold controls when the BAM publish
+	 * the descriptor size on the sideband interface.
+	 * SPS HW will only be used when
+	 * data transfer size >  64 bytes.
+	 */
+	bam.summing_threshold = 64;
+	/* SPS driver wll handle the crypto BAM IRQ */
+	bam.irq = (u32)pce_dev->ce_sps.bam_irq;
+	bam.manage = SPS_BAM_MGR_LOCAL;
+
+	pr_debug("bam physical base=0x%x\n", (u32)bam.phys_addr);
+	pr_debug("bam virtual base=0x%x\n", (u32)bam.virt_addr);
+
+	mutex_lock(&bam_register_cnt);
+	if ((bam_registry.handle == 0) && (bam_registry.cnt == 0)) {
+		/* Register CE Peripheral BAM device to SPS driver */
+		rc = sps_register_bam_device(&bam, &bam_registry.handle);
+		if (rc) {
+			pr_err("sps_register_bam_device() failed! err=%d", rc);
+			return -EIO;
+		}
+		bam_registry.cnt++;
+		register_bam = true;
+	} else {
+		   bam_registry.cnt++;
+	}
+	mutex_unlock(&bam_register_cnt);
+	pce_dev->ce_sps.bam_handle =  bam_registry.handle;
+	pr_debug("BAM device registered. bam_handle=0x%x",
+		pce_dev->ce_sps.bam_handle);
+
+	rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.producer, true);
+	if (rc)
+		goto sps_connect_producer_err;
+	rc = qce_sps_init_ep_conn(pce_dev, &pce_dev->ce_sps.consumer, false);
+	if (rc)
+		goto sps_connect_consumer_err;
+
+	pce_dev->ce_sps.out_transfer.user = pce_dev->ce_sps.producer.pipe;
+	pce_dev->ce_sps.in_transfer.user = pce_dev->ce_sps.consumer.pipe;
+	pr_info(" Qualcomm MSM CE-BAM at 0x%016llx irq %d\n",
+		(unsigned long long)pce_dev->ce_sps.bam_mem,
+		(unsigned int)pce_dev->ce_sps.bam_irq);
+	return rc;
+
+sps_connect_consumer_err:
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
+sps_connect_producer_err:
+	if (register_bam)
+		sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
+
+	return rc;
+}
+
+/**
+ * De-initialize SPS HW connected with CE core
+ *
+ * This function deinitialize SPS endpoints and then
+ * deregisters BAM resources from SPS driver.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ */
+static void qce_sps_exit(struct qce_device *pce_dev)
+{
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.consumer);
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_sps.producer);
+	mutex_lock(&bam_register_cnt);
+	if ((bam_registry.handle != 0) && (bam_registry.cnt == 1)) {
+		sps_deregister_bam_device(pce_dev->ce_sps.bam_handle);
+		bam_registry.cnt = 0;
+		bam_registry.handle = 0;
+	}
+	if ((bam_registry.handle != 0) && (bam_registry.cnt > 1))
+		bam_registry.cnt--;
+	mutex_unlock(&bam_register_cnt);
+
+	iounmap(pce_dev->ce_sps.bam_iobase);
+}
+
+static void _aead_sps_producer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+};
+
+static void _aead_sps_consumer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+		_aead_complete(pce_dev);
+	}
+};
+
+static void _sha_sps_producer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+		_sha_complete(pce_dev);
+	}
+};
+
+static void _sha_sps_consumer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+	_sha_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_sps_producer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.consumer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+static void _ablk_cipher_sps_consumer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+
+	pce_dev->ce_sps.notify = *notify;
+	pr_debug("sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
+			notify->event_id,
+			notify->data.transfer.iovec.addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags);
+
+	pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_COMP;
+	if (pce_dev->ce_sps.producer_state == QCE_PIPE_STATE_COMP) {
+		pce_dev->ce_sps.consumer_state = QCE_PIPE_STATE_IDLE;
+		pce_dev->ce_sps.producer_state = QCE_PIPE_STATE_IDLE;
+
+		/* done */
+		_ablk_cipher_complete(pce_dev);
+	}
+};
+
+static void qce_add_cmd_element(struct qce_device *pdev,
+			struct sps_command_element **cmd_ptr, u32 addr,
+			u32 data, struct sps_command_element **populate)
+{
+	(*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
+	(*cmd_ptr)->data = data;
+	(*cmd_ptr)->mask = 0xFFFFFFFF;
+	if (populate != NULL)
+		*populate = *cmd_ptr;
+	(*cmd_ptr)++ ;
+}
+
+static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev,
+		unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t xts_key_reg = 0;
+	uint32_t iv_reg = 0;
+	uint32_t crypto_cfg = 0;
+	uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+	uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+	crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+			BIT(CRYPTO_MASK_DOUT_INTR) |
+			BIT(CRYPTO_MASK_DIN_INTR) |
+			BIT(CRYPTO_MASK_OP_DONE_INTR) |
+			(0 << CRYPTO_HIGH_SPD_EN_N) |
+			(pipe_pair << CRYPTO_PIPE_SET_SELECT);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to AES cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (mode) {
+	case QCE_MODE_CBC:
+	case QCE_MODE_CTR:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+						CRYPTO_ENCR_ALG);
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
+							CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+							CRYPTO_ENCR_ALG);
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_ECB:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_ecb.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_ECB <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 0;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_ecb.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
+							CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+							CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_ECB <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 0;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_XTS:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_xts.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_xts);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_XTS <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 4;
+		} else {
+			cmdlistptr->cipher_aes_256_xts.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_xts);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 <<
+							CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_AES <<
+							CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_XTS <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 8;
+		}
+	break;
+	default:
+	break;
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, crypto_cfg,
+						&pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+				(uint32_t)0xffffffff, &pcl_info->encr_mask);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (xts_key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
+					0, &pcl_info->encr_xts_key);
+		for (i = 1; i < xts_key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_ENCR_XTS_DU_SIZE_REG, 0, NULL);
+	}
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	}
+	/* Add dummy to  align size to burst-size multiple */
+	if (mode == QCE_MODE_XTS) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_size);
+
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+			&pcl_info->go_proc);
+
+	pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev,
+		unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
+		bool mode_cbc)
+{
+
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t iv_reg = 0;
+	uint32_t crypto_cfg = 0;
+	uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+	uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+	crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+			BIT(CRYPTO_MASK_DOUT_INTR) |
+			BIT(CRYPTO_MASK_DIN_INTR) |
+			BIT(CRYPTO_MASK_OP_DONE_INTR) |
+			(0 << CRYPTO_HIGH_SPD_EN_N) |
+			(pipe_pair << CRYPTO_PIPE_SET_SELECT);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case CIPHER_ALG_DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_des_cbc.cmdlist =
+						(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_cbc);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_DES <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_DES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_CBC <<
+							CRYPTO_ENCR_MODE);
+			iv_reg = 2;
+			key_reg = 2;
+		} else {
+			cmdlistptr->cipher_des_ecb.cmdlist =
+						(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_ecb);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_DES <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_DES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_ECB <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 0;
+			key_reg = 2;
+		}
+	break;
+	case CIPHER_ALG_3DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_3des_cbc.cmdlist =
+						(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_cbc);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_3DES <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_DES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_CBC <<
+							CRYPTO_ENCR_MODE);
+			iv_reg = 2;
+			key_reg = 6;
+		} else {
+			cmdlistptr->cipher_3des_ecb.cmdlist =
+						(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_ecb);
+
+			encr_cfg = (CRYPTO_ENCR_KEY_SZ_3DES <<
+						CRYPTO_ENCR_KEY_SZ) |
+					(CRYPTO_ENCR_ALG_DES <<
+						CRYPTO_ENCR_ALG) |
+					(CRYPTO_ENCR_MODE_ECB <<
+						CRYPTO_ENCR_MODE);
+			iv_reg = 0;
+			key_reg = 6;
+		}
+	break;
+	default:
+	break;
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG, crypto_cfg,
+						&pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+								NULL);
+		/* Add 2 dummy to  align size to burst-size multiple */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR2_IV2_REG, 0,
+								NULL);
+	}
+	/* Add dummy to  align size to burst-size multiple */
+	if (!mode_cbc) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_size);
+
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+			&pcl_info->go_proc);
+
+	pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_auth_cmdlistptrs(struct qce_device *pdev,
+		unsigned char **pvaddr, enum qce_hash_alg_enum alg,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr =
+			(struct sps_command_element *)(*pvaddr);
+	uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t key_reg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t iv_reg = 0;
+	uint32_t crypto_cfg = 0;
+	uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+	uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+	crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+			BIT(CRYPTO_MASK_DOUT_INTR) |
+			BIT(CRYPTO_MASK_DIN_INTR) |
+			BIT(CRYPTO_MASK_OP_DONE_INTR) |
+			(0 << CRYPTO_HIGH_SPD_EN_N) |
+			(pipe_pair << CRYPTO_PIPE_SET_SELECT);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to authentication operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case QCE_HASH_SHA1:
+		cmdlistptr->auth_sha1.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1);
+
+		auth_cfg = (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		iv_reg = 5;
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+							0, NULL);
+
+	break;
+	case QCE_HASH_SHA256:
+		cmdlistptr->auth_sha256.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256);
+
+		auth_cfg = (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		iv_reg = 8;
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 2 dummy writes */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_SHA1_HMAC:
+		cmdlistptr->auth_sha1_hmac.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1_hmac);
+
+		auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		key_reg = 16;
+		iv_reg = 5;
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+							0, NULL);
+	break;
+	case QCE_AEAD_SHA1_HMAC:
+		cmdlistptr->aead_sha1_hmac.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_sha1_hmac);
+
+		auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS) |
+				(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+		key_reg = 16;
+		iv_reg = 5;
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 2 dummy writes */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+							0, NULL);
+	break;
+	case QCE_HASH_SHA256_HMAC:
+		cmdlistptr->auth_sha256_hmac.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256_hmac);
+
+		auth_cfg = (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		key_reg = 16;
+		iv_reg = 8;
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 2 dummy writes */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_AES_CMAC:
+		if (key_128 == true) {
+			cmdlistptr->auth_aes_128_cmac.cmdlist =
+						(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_128_cmac);
+
+			auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+				(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
+							CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_KEY_SZ_AES128 <<
+							CRYPTO_AUTH_KEY_SIZE) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+			key_reg = 4;
+		} else {
+			cmdlistptr->auth_aes_256_cmac.cmdlist =
+							(uint32_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_256_cmac);
+
+			auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST)|
+				(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+				(CRYPTO_AUTH_SIZE_ENUM_16_BYTES <<
+							CRYPTO_AUTH_SIZE) |
+				(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+				(CRYPTO_AUTH_KEY_SZ_AES256 <<
+							CRYPTO_AUTH_KEY_SIZE) |
+				(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+			key_reg = 8;
+		}
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					crypto_cfg, &pcl_info->crypto_cfg);
+		/* 2 dummy writes */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+								0, NULL);
+	break;
+	default:
+	break;
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+
+	if (alg == QCE_HASH_AES_CMAC) {
+		/* reset auth iv, bytecount and key  registers */
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, NULL);
+	} else {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+							&pcl_info->auth_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, &pcl_info->auth_bytecount);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	if (key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
+		for (i = 1; i < key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+	}
+	if (alg != QCE_AEAD_SHA1_HMAC)
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+			&pcl_info->go_proc);
+
+	pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
+				unsigned char **pvaddr, bool key_128)
+{
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t crypto_cfg = 0;
+	uint32_t beats = (pdev->ce_sps.ce_burst_size >> 3) - 1;
+	uint32_t pipe_pair = pdev->ce_sps.pipe_pair_index;
+
+	crypto_cfg = (beats << CRYPTO_REQ_SIZE) |
+			BIT(CRYPTO_MASK_DOUT_INTR) |
+			BIT(CRYPTO_MASK_DIN_INTR) |
+			BIT(CRYPTO_MASK_OP_DONE_INTR) |
+			(0 << CRYPTO_HIGH_SPD_EN_N) |
+			(pipe_pair << CRYPTO_PIPE_SET_SELECT);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to aead operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	if (key_128 == true) {
+		cmdlistptr->aead_aes_128_ccm.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_128_ccm);
+
+		auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+			(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+			(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+			(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
+		auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+		encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+			(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+			((CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE));
+		key_reg = 4;
+	} else {
+
+		cmdlistptr->aead_aes_256_ccm.cmdlist = (uint32_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_256_ccm);
+
+		auth_cfg = (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+			(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+			(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+			(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
+			((MAX_NONCE/sizeof(uint32_t)) <<
+						CRYPTO_AUTH_NONCE_NUM_WORDS);
+		auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+		encr_cfg = (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+			(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+			((CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE));
+		key_reg = 8;
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+				crypto_cfg, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+									NULL);
+	/* add 1 dummy */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+					encr_cfg, &pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+				(uint32_t)0xffffffff, &pcl_info->encr_mask);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+	/* reset auth iv, bytecount and key  registers */
+	for (i = 0; i < 8; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+					0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
+					0, NULL);
+	for (i = 0; i < 16; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set auth key */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+							&pcl_info->auth_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set NONCE info */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
+						&pcl_info->auth_nonce_info);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_INFO_NONCE0_REG +
+				i * sizeof(uint32_t)), 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
+						&pcl_info->encr_ccm_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
+			0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+			&pcl_info->go_proc);
+
+	pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
+		unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr =
+			(struct sps_command_element *)(*pvaddr);
+	uint32_t ce_vaddr_start = (uint32_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+
+	cmdlistptr->unlock_all_pipes.cmdlist = (uint32_t)ce_vaddr;
+	pcl_info = &(cmdlistptr->unlock_all_pipes);
+
+	/*
+	 * Designate chunks of the allocated memory to command list
+	 * to unlock pipes.
+	 */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	pcl_info->size = (uint32_t)ce_vaddr - (uint32_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int qce_setup_cmdlistptrs(struct qce_device *pdev,
+					unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	ce_vaddr =
+		(struct sps_command_element *) ALIGN(((unsigned int) ce_vaddr),
+									16);
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, true);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, true);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, true);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, true);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CBC, false);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_CTR, false);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_ECB, false);
+	_setup_cipher_aes_cmdlistptrs(pdev, pvaddr, QCE_MODE_XTS, false);
+
+	_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, true);
+	_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_DES, false);
+	_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, true);
+	_setup_cipher_des_cmdlistptrs(pdev, pvaddr, CIPHER_ALG_3DES, false);
+
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1, false);
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256, false);
+
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA1_HMAC, false);
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_SHA256_HMAC, false);
+
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, true);
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_HASH_AES_CMAC, false);
+
+	_setup_auth_cmdlistptrs(pdev, pvaddr, QCE_AEAD_SHA1_HMAC, false);
+
+	_setup_aead_cmdlistptrs(pdev, pvaddr, true);
+	_setup_aead_cmdlistptrs(pdev, pvaddr, false);
+	_setup_unlock_pipe_cmdlistptrs(pdev, pvaddr);
+
+	return 0;
+}
+
+static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
+{
+	unsigned char *vaddr;
+
+	vaddr = pce_dev->coh_vmem;
+	vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr),  16);
+
+	/* Allow for 256 descriptor (cmd and data) entries per pipe */
+	pce_dev->ce_sps.in_transfer.iovec = (struct sps_iovec *)vaddr;
+	pce_dev->ce_sps.in_transfer.iovec_phys =
+					(uint32_t)GET_PHYS_ADDR(vaddr);
+	vaddr += MAX_BAM_DESCRIPTORS * 8;
+
+	pce_dev->ce_sps.out_transfer.iovec = (struct sps_iovec *)vaddr;
+	pce_dev->ce_sps.out_transfer.iovec_phys =
+					(uint32_t)GET_PHYS_ADDR(vaddr);
+	vaddr += MAX_BAM_DESCRIPTORS * 8;
+
+	qce_setup_cmdlistptrs(pce_dev, &vaddr);
+	pce_dev->ce_sps.result_dump = (uint32_t)vaddr;
+	pce_dev->ce_sps.result = (struct ce_result_dump_format *)vaddr;
+	vaddr += 128;
+
+	return 0;
+}
+
+int qce_aead_sha1_hmac_setup(struct qce_req *creq, struct crypto_aead *aead,
+				struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t authk_size_in_word = creq->authklen/sizeof(uint32_t);
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	struct sps_command_element *pce = NULL;
+	struct aead_request *areq = (struct aead_request *)creq->areq;
+	int i;
+
+	_byte_stream_to_net_words(mackey32, creq->authkey,
+					creq->authklen);
+	pce = cmdlistinfo->auth_key;
+	for (i = 0; i < authk_size_in_word; i++, pce++)
+		pce->data = mackey32[i];
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < 5; i++, pce++)
+		pce->data = _std_init_vector_sha1[i];
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = creq->cryptlen + areq->assoclen + crypto_aead_ivsize(aead);
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	return 0;
+}
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct aead_request *areq = (struct aead_request *) q_req->areq;
+	uint32_t authsize = q_req->authsize;
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	uint32_t ivsize = 0;
+	uint32_t totallen_in, out_len;
+	uint32_t hw_pad_out = 0;
+	int rc = 0;
+	int ce_burst_size;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	struct qce_cmdlist_info *auth_cmdlistinfo = NULL;
+
+	if (q_req->mode != QCE_MODE_CCM)
+		ivsize = crypto_aead_ivsize(aead);
+
+	ce_burst_size = pce_dev->ce_sps.ce_burst_size;
+	if (q_req->dir == QCE_ENCRYPT) {
+		q_req->cryptlen = areq->cryptlen;
+			totallen_in = q_req->cryptlen + areq->assoclen + ivsize;
+		if (q_req->mode == QCE_MODE_CCM) {
+			out_len = areq->cryptlen + authsize;
+			hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
+		} else {
+			out_len = areq->cryptlen;
+		}
+	} else {
+		q_req->cryptlen = areq->cryptlen - authsize;
+		if (q_req->mode == QCE_MODE_CCM)
+			totallen_in = areq->cryptlen + areq->assoclen;
+		else
+			totallen_in = q_req->cryptlen + areq->assoclen + ivsize;
+		out_len = q_req->cryptlen;
+		hw_pad_out = authsize;
+	}
+
+	pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen);
+	pce_dev->src_nents = count_sg(areq->src, areq->cryptlen);
+	pce_dev->ivsize = q_req->ivsize;
+	pce_dev->authsize = q_req->authsize;
+	pce_dev->phy_iv_in = 0;
+
+	/* associated data input */
+	dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+					 DMA_TO_DEVICE);
+	/* cipher input */
+	dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher + mac output  for encryption    */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, out_len);
+		dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	} else {
+		pce_dev->dst_nents = pce_dev->src_nents;
+	}
+
+	_ce_get_cipher_cmdlistinfo(pce_dev, q_req, &cmdlistinfo);
+	/* set up crypto device */
+	rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
+				areq->assoclen + ivsize, cmdlistinfo);
+	if (rc < 0)
+		goto bad;
+
+	if (q_req->mode != QCE_MODE_CCM) {
+		rc = qce_aead_sha1_hmac_setup(q_req, aead, auth_cmdlistinfo);
+		if (rc < 0)
+			goto bad;
+		/* overwrite seg size */
+		cmdlistinfo->seg_size->data = totallen_in;
+		/* cipher iv for input */
+		pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv,
+			ivsize, DMA_TO_DEVICE);
+	}
+
+	/* setup for callback, and issue command to bam */
+	pce_dev->areq = q_req->areq;
+	pce_dev->qce_cb = q_req->qce_cb;
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.producer.event.callback = _aead_sps_producer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+					&pce_dev->ce_sps.producer.event);
+	if (rc) {
+		pr_err("Producer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.consumer.event.callback = _aead_sps_consumer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
+					&pce_dev->ce_sps.consumer.event);
+	if (rc) {
+		pr_err("Consumer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	_qce_sps_iovec_count_init(pce_dev);
+
+	_qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+					&pce_dev->ce_sps.in_transfer);
+
+	if (pce_dev->ce_sps.minor_version == 0) {
+		_qce_sps_add_sg_data(pce_dev, areq->src, totallen_in,
+					&pce_dev->ce_sps.in_transfer);
+
+		_qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+		_qce_sps_add_sg_data(pce_dev, areq->dst, out_len +
+					areq->assoclen + hw_pad_out,
+				&pce_dev->ce_sps.out_transfer);
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					&pce_dev->ce_sps.out_transfer);
+	} else {
+		_qce_sps_add_sg_data(pce_dev, areq->assoc, areq->assoclen,
+					 &pce_dev->ce_sps.in_transfer);
+		_qce_sps_add_data((uint32_t)pce_dev->phy_iv_in, ivsize,
+					&pce_dev->ce_sps.in_transfer);
+		_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
+					&pce_dev->ce_sps.in_transfer);
+		_qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+
+		/* Pass through to ignore associated (+iv, if applicable) data*/
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
+				(ivsize + areq->assoclen),
+				&pce_dev->ce_sps.out_transfer);
+		_qce_sps_add_sg_data(pce_dev, areq->dst, out_len,
+					&pce_dev->ce_sps.out_transfer);
+		/* Pass through to ignore hw_pad (padding of the MAC data) */
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.ignore_buffer),
+				hw_pad_out, &pce_dev->ce_sps.out_transfer);
+
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+			CRYPTO_RESULT_DUMP_SIZE, &pce_dev->ce_sps.out_transfer);
+	}
+	rc = _qce_sps_transfer(pce_dev);
+	if (rc)
+		goto bad;
+	return 0;
+
+bad:
+	if (pce_dev->assoc_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents,
+				DMA_TO_DEVICE);
+	}
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	}
+	if (areq->src != areq->dst) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+	if (pce_dev->phy_iv_in) {
+		dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in,
+				ivsize, DMA_TO_DEVICE);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)
+						c_req->areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+
+	pce_dev->src_nents = 0;
+	pce_dev->dst_nents = 0;
+	_ce_get_cipher_cmdlistinfo(pce_dev, c_req, &cmdlistinfo);
+
+	/* cipher input */
+	pce_dev->src_nents = count_sg(areq->src, areq->nbytes);
+
+	dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher output */
+	if (areq->src != areq->dst) {
+		pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes);
+			dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents,
+							DMA_FROM_DEVICE);
+	} else {
+		pce_dev->dst_nents = pce_dev->src_nents;
+	}
+	/* set up crypto device */
+	rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0, cmdlistinfo);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for client callback, and issue command to BAM */
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = c_req->qce_cb;
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.producer.event.callback =
+				_ablk_cipher_sps_producer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+					&pce_dev->ce_sps.producer.event);
+	if (rc) {
+		pr_err("Producer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.consumer.event.callback =
+			_ablk_cipher_sps_consumer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
+					&pce_dev->ce_sps.consumer.event);
+	if (rc) {
+		pr_err("Consumer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	_qce_sps_iovec_count_init(pce_dev);
+
+	_qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+					&pce_dev->ce_sps.in_transfer);
+	_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+					&pce_dev->ce_sps.in_transfer);
+	_qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+
+	_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
+					&pce_dev->ce_sps.out_transfer);
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_dev->ce_sps.out_transfer);
+	rc = _qce_sps_transfer(pce_dev);
+	if (rc)
+		goto bad;
+		return 0;
+bad:
+	if (pce_dev->dst_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->dst,
+		pce_dev->dst_nents, DMA_FROM_DEVICE);
+	}
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, areq->src,
+				pce_dev->src_nents,
+				(areq->src == areq->dst) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+
+	struct ahash_request *areq = (struct ahash_request *)sreq->areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+
+	pce_dev->src_nents = count_sg(sreq->src, sreq->size);
+	_ce_get_hash_cmdlistinfo(pce_dev, sreq, &cmdlistinfo);
+	dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents,
+							DMA_TO_DEVICE);
+	rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
+	if (rc < 0)
+		goto bad;
+
+	pce_dev->areq = areq;
+	pce_dev->qce_cb = sreq->qce_cb;
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.producer.event.callback = _sha_sps_producer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.producer.pipe,
+					&pce_dev->ce_sps.producer.event);
+	if (rc) {
+		pr_err("Producer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	/* Register callback event for EOT (End of transfer) event. */
+	pce_dev->ce_sps.consumer.event.callback = _sha_sps_consumer_callback;
+	rc = sps_register_event(pce_dev->ce_sps.consumer.pipe,
+					&pce_dev->ce_sps.consumer.event);
+	if (rc) {
+		pr_err("Consumer callback registration failed rc = %d\n", rc);
+		goto bad;
+	}
+
+	_qce_sps_iovec_count_init(pce_dev);
+
+	_qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+					&pce_dev->ce_sps.in_transfer);
+	_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+						 &pce_dev->ce_sps.in_transfer);
+	_qce_set_eot_flag(&pce_dev->ce_sps.in_transfer);
+
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_dev->ce_sps.result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_dev->ce_sps.out_transfer);
+	rc = _qce_sps_transfer(pce_dev);
+	if (rc)
+		goto bad;
+		return 0;
+bad:
+	if (pce_dev->src_nents) {
+		dma_unmap_sg(pce_dev->pdev, sreq->src,
+				pce_dev->src_nents, DMA_TO_DEVICE);
+	}
+	return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+static int __qce_get_device_tree_data(struct platform_device *pdev,
+		struct qce_device *pce_dev)
+{
+	struct resource *resource;
+	int rc = 0;
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-pipe-pair",
+				&pce_dev->ce_sps.pipe_pair_index)) {
+		pr_err("Fail to get bam pipe pair information.\n");
+		return -EINVAL;
+	} else {
+		pr_warn("bam_pipe_pair=0x%x", pce_dev->ce_sps.pipe_pair_index);
+	}
+	pce_dev->ce_sps.dest_pipe_index	= 2 * pce_dev->ce_sps.pipe_pair_index;
+	pce_dev->ce_sps.src_pipe_index	= pce_dev->ce_sps.dest_pipe_index + 1;
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-base");
+	if (resource) {
+		pce_dev->phy_iobase = resource->start;
+		pce_dev->iobase = ioremap_nocache(resource->start,
+					resource_size(resource));
+		if (!pce_dev->iobase) {
+			pr_err("Can not map CRYPTO io memory\n");
+			return -ENOMEM;
+		}
+	} else {
+		pr_err("CRYPTO HW mem unavailable.\n");
+		return -ENODEV;
+	}
+	pr_warn("ce_phy_reg_base=0x%x  ", pce_dev->phy_iobase);
+	pr_warn("ce_virt_reg_base=0x%x\n", (uint32_t)pce_dev->iobase);
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-bam-base");
+	if (resource) {
+		pce_dev->ce_sps.bam_mem = resource->start;
+		pce_dev->ce_sps.bam_iobase = ioremap_nocache(resource->start,
+					resource_size(resource));
+		if (!pce_dev->iobase) {
+			rc = -ENOMEM;
+			pr_err("Can not map BAM io memory\n");
+			goto err_getting_bam_info;
+		}
+	} else {
+		pr_err("CRYPTO BAM mem unavailable.\n");
+		rc = -ENODEV;
+		goto err_getting_bam_info;
+	}
+	pr_warn("ce_bam_phy_reg_base=0x%x  ", pce_dev->ce_sps.bam_mem);
+	pr_warn("ce_bam_virt_reg_base=0x%x\n",
+				(uint32_t)pce_dev->ce_sps.bam_iobase);
+
+	resource  = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (resource) {
+		pce_dev->ce_sps.bam_irq = resource->start;
+		pr_warn("CRYPTO BAM IRQ = %d.\n", pce_dev->ce_sps.bam_irq);
+	} else {
+		pr_err("CRYPTO BAM IRQ unavailable.\n");
+		goto err_dev;
+	}
+	return rc;
+err_dev:
+	if (pce_dev->ce_sps.bam_iobase)
+		iounmap(pce_dev->ce_sps.bam_iobase);
+
+err_getting_bam_info:
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+	return rc;
+}
+
+static int __qce_init_clk(struct qce_device *pce_dev)
+{
+	int rc = 0;
+	struct clk *ce_core_clk;
+	struct clk *ce_clk;
+	struct clk *ce_core_src_clk;
+
+	/* Get CE3 src core clk. */
+	ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
+	if (!IS_ERR(ce_core_src_clk)) {
+		pce_dev->ce_core_src_clk = ce_core_src_clk;
+
+		/* Set the core src clk @100Mhz */
+		rc = clk_set_rate(pce_dev->ce_core_src_clk, 100000000);
+		if (rc) {
+			clk_put(pce_dev->ce_core_src_clk);
+			pr_err("Unable to set the core src clk @100Mhz.\n");
+			goto err_clk;
+		}
+	} else {
+		pr_warn("Unable to get CE core src clk, set to NULL\n");
+		pce_dev->ce_core_src_clk = NULL;
+	}
+
+	/* Get CE core clk */
+	ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
+	if (IS_ERR(ce_core_clk)) {
+		rc = PTR_ERR(ce_core_clk);
+		pr_err("Unable to get CE core clk\n");
+		if (pce_dev->ce_core_src_clk != NULL)
+			clk_put(pce_dev->ce_core_src_clk);
+		goto err_clk;
+	}
+	pce_dev->ce_core_clk = ce_core_clk;
+
+	/* Get CE Interface clk */
+	ce_clk = clk_get(pce_dev->pdev, "iface_clk");
+	if (IS_ERR(ce_clk)) {
+		rc = PTR_ERR(ce_clk);
+		pr_err("Unable to get CE interface clk\n");
+		if (pce_dev->ce_core_src_clk != NULL)
+			clk_put(pce_dev->ce_core_src_clk);
+		clk_put(pce_dev->ce_core_clk);
+		goto err_clk;
+	}
+	pce_dev->ce_clk = ce_clk;
+
+	/* Enable CE core clk */
+	rc = clk_prepare_enable(pce_dev->ce_core_clk);
+	if (rc) {
+		pr_err("Unable to enable/prepare CE core clk\n");
+		if (pce_dev->ce_core_src_clk != NULL)
+			clk_put(pce_dev->ce_core_src_clk);
+		clk_put(pce_dev->ce_core_clk);
+		clk_put(pce_dev->ce_clk);
+		goto err_clk;
+	} else {
+		/* Enable CE clk */
+		rc = clk_prepare_enable(pce_dev->ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			clk_disable_unprepare(pce_dev->ce_core_clk);
+			if (pce_dev->ce_core_src_clk != NULL)
+				clk_put(pce_dev->ce_core_src_clk);
+			clk_put(pce_dev->ce_core_clk);
+			clk_put(pce_dev->ce_clk);
+			goto err_clk;
+		}
+	}
+err_clk:
+	if (rc)
+		pr_err("Unable to init CE clks, rc = %d\n", rc);
+	return rc;
+}
+
+/* crypto engine open function. */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+	struct qce_device *pce_dev;
+
+	pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+	if (!pce_dev) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate memory: %d\n", *rc);
+		return NULL;
+	}
+	pce_dev->pdev = &pdev->dev;
+
+	if (pdev->dev.of_node) {
+		*rc = __qce_get_device_tree_data(pdev, pce_dev);
+		if (*rc)
+			goto err_pce_dev;
+	} else {
+		*rc = -EINVAL;
+		pr_err("Device Node not found.\n");
+		goto err_pce_dev;
+	}
+
+	pce_dev->memsize = 9 * PAGE_SIZE;
+	pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+			pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
+	if (pce_dev->coh_vmem == NULL) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto err_iobase;
+	}
+
+	*rc = __qce_init_clk(pce_dev);
+	if (*rc)
+		goto err_mem;
+
+	if (_probe_ce_engine(pce_dev)) {
+		*rc = -ENXIO;
+		goto err;
+	}
+	*rc = 0;
+	qce_setup_ce_sps_data(pce_dev);
+	qce_sps_init(pce_dev);
+
+	return pce_dev;
+err:
+	clk_disable_unprepare(pce_dev->ce_clk);
+	clk_disable_unprepare(pce_dev->ce_core_clk);
+
+	if (pce_dev->ce_core_src_clk != NULL)
+		clk_put(pce_dev->ce_core_src_clk);
+	clk_put(pce_dev->ce_clk);
+	clk_put(pce_dev->ce_core_clk);
+err_mem:
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+			pce_dev->coh_vmem, pce_dev->coh_pmem);
+err_iobase:
+	if (pce_dev->ce_sps.bam_iobase)
+		iounmap(pce_dev->ce_sps.bam_iobase);
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+err_pce_dev:
+	kfree(pce_dev);
+	return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/* crypto engine close function. */
+int qce_close(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+				pce_dev->coh_vmem, pce_dev->coh_pmem);
+
+	clk_disable_unprepare(pce_dev->ce_clk);
+	clk_disable_unprepare(pce_dev->ce_core_clk);
+	if (pce_dev->ce_core_src_clk != NULL)
+		clk_put(pce_dev->ce_core_src_clk);
+	clk_put(pce_dev->ce_clk);
+	clk_put(pce_dev->ce_core_clk);
+
+	qce_sps_exit(pce_dev);
+	kfree(handle);
+
+	return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+
+	if (ce_support == NULL)
+		return -EINVAL;
+
+	ce_support->sha1_hmac_20 = false;
+	ce_support->sha1_hmac = false;
+	ce_support->sha256_hmac = false;
+	ce_support->sha_hmac = true;
+	ce_support->cmac  = true;
+	ce_support->aes_key_192 = false;
+	ce_support->aes_xts = true;
+	ce_support->ota = false;
+	ce_support->bam = true;
+	if (pce_dev->ce_sps.minor_version) {
+		ce_support->aligned_only = false;
+		ce_support->aes_ccm = true;
+	} else {
+		ce_support->aligned_only = true;
+		ce_support->aes_ccm = false;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+static int __init qce_init(void)
+{
+	bam_registry.handle = 0;
+	bam_registry.cnt = 0;
+	return 0;
+}
+
+static void __exit qce_exit(void)
+{
+	bam_registry.handle = 0;
+	bam_registry.cnt = 0;
+}
+
+module_init(qce_init);
+module_exit(qce_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto Engine driver");
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
new file mode 100644
index 0000000..c9eba82
--- /dev/null
+++ b/drivers/crypto/msm/qce50.h
@@ -0,0 +1,148 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
+#define _DRIVERS_CRYPTO_MSM_QCE50_H_
+
+#include <mach/sps.h>
+
+/* MAX Data xfer block size between BAM and CE */
+#define MAX_CE_BAM_BURST_SIZE   0x40
+#define QCEBAM_BURST_SIZE	MAX_CE_BAM_BURST_SIZE
+#define MAX_BAM_DESCRIPTORS	(0x40 - 1)
+
+#define GET_VIRT_ADDR(x)  \
+		((uint32_t)pce_dev->coh_vmem +			\
+		((uint32_t)x - pce_dev->coh_pmem))
+#define GET_PHYS_ADDR(x)  \
+		(pce_dev->coh_pmem + (x - (uint32_t)pce_dev->coh_vmem))
+
+#define CRYPTO_REG_SIZE 4
+#define NUM_OF_CRYPTO_AUTH_IV_REG 16
+#define NUM_OF_CRYPTO_CNTR_IV_REG 4
+#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
+#define CRYPTO_TOTAL_REGISTERS_DUMPED   26
+#define CRYPTO_RESULT_DUMP_SIZE   \
+	ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
+	QCEBAM_BURST_SIZE)
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC    128
+#define SPS_MAX_PKT_SIZE  (64 * 1024  - 1)
+
+/* State of consumer/producer Pipe */
+enum qce_pipe_st_enum {
+	QCE_PIPE_STATE_IDLE = 0,
+	QCE_PIPE_STATE_IN_PROG = 1,
+	QCE_PIPE_STATE_COMP = 2,
+	QCE_PIPE_STATE_LAST
+};
+
+struct qce_sps_ep_conn_data {
+	struct sps_pipe			*pipe;
+	struct sps_connect		connect;
+	struct sps_register_event	event;
+};
+
+/* CE Result DUMP format*/
+struct ce_result_dump_format {
+	uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
+	uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
+	uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
+	uint32_t status;
+	uint32_t status2;
+};
+
+struct qce_cmdlist_info {
+
+	uint32_t cmdlist;
+	struct sps_command_element *crypto_cfg;
+	struct sps_command_element *encr_seg_cfg;
+	struct sps_command_element *encr_seg_size;
+	struct sps_command_element *encr_seg_start;
+	struct sps_command_element *encr_key;
+	struct sps_command_element *encr_xts_key;
+	struct sps_command_element *encr_cntr_iv;
+	struct sps_command_element *encr_ccm_cntr_iv;
+	struct sps_command_element *encr_mask;
+	struct sps_command_element *encr_xts_du_size;
+
+	struct sps_command_element *auth_seg_cfg;
+	struct sps_command_element *auth_seg_size;
+	struct sps_command_element *auth_seg_start;
+	struct sps_command_element *auth_key;
+	struct sps_command_element *auth_iv;
+	struct sps_command_element *auth_nonce_info;
+	struct sps_command_element *auth_bytecount;
+	struct sps_command_element *seg_size;
+	struct sps_command_element *go_proc;
+	uint32_t size;
+};
+
+struct qce_cmdlistptr_ops {
+	struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_128_ecb;
+	struct qce_cmdlist_info cipher_aes_256_ecb;
+	struct qce_cmdlist_info cipher_aes_128_xts;
+	struct qce_cmdlist_info cipher_aes_256_xts;
+	struct qce_cmdlist_info cipher_des_cbc;
+	struct qce_cmdlist_info cipher_des_ecb;
+	struct qce_cmdlist_info cipher_3des_cbc;
+	struct qce_cmdlist_info cipher_3des_ecb;
+	struct qce_cmdlist_info auth_sha1;
+	struct qce_cmdlist_info auth_sha256;
+	struct qce_cmdlist_info auth_sha1_hmac;
+	struct qce_cmdlist_info auth_sha256_hmac;
+	struct qce_cmdlist_info auth_aes_128_cmac;
+	struct qce_cmdlist_info auth_aes_256_cmac;
+	struct qce_cmdlist_info aead_sha1_hmac;
+	struct qce_cmdlist_info aead_aes_128_ccm;
+	struct qce_cmdlist_info aead_aes_256_ccm;
+	struct qce_cmdlist_info unlock_all_pipes;
+};
+
+
+/* DM data structure with buffers, commandlists & commmand pointer lists */
+struct ce_sps_data {
+
+	uint32_t			bam_irq;
+	uint32_t			bam_mem;
+	void __iomem			*bam_iobase;
+
+	struct qce_sps_ep_conn_data	producer;
+	struct qce_sps_ep_conn_data	consumer;
+	struct sps_event_notify		notify;
+	struct scatterlist		*src;
+	struct scatterlist		*dst;
+	unsigned int			pipe_pair_index;
+	unsigned int			src_pipe_index;
+	unsigned int			dest_pipe_index;
+	uint32_t			bam_handle;
+
+	enum qce_pipe_st_enum consumer_state;	/* Consumer pipe state */
+	enum qce_pipe_st_enum producer_state;	/* Producer pipe state */
+
+	int consumer_status;		/* consumer pipe status */
+	int producer_status;		/* producer pipe status */
+
+	struct sps_transfer in_transfer;
+	struct sps_transfer out_transfer;
+
+	int ce_burst_size;
+
+	struct qce_cmdlistptr_ops cmdlistptr;
+	uint32_t result_dump;
+	uint32_t ignore_buffer;
+	struct ce_result_dump_format *result;
+	uint32_t minor_version;
+};
+#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index fecce3f..63f7fd9 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -2016,21 +2016,8 @@
 	struct qcedev_control *podev;
 	struct msm_ce_hw_support *platform_support;
 
-	if (pdev->id >= MAX_QCE_DEVICE) {
-		pr_err("%s: device id %d  exceeds allowed %d\n",
-			__func__, pdev->id, MAX_QCE_DEVICE);
-		return -ENOENT;
-	}
-	podev = &qce_dev[pdev->id];
+	podev = &qce_dev[0];
 
-	platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
-	podev->platform_support.ce_shared = platform_support->ce_shared;
-	podev->platform_support.shared_ce_resource =
-				platform_support->shared_ce_resource;
-	podev->platform_support.hw_key_support =
-				platform_support->hw_key_support;
-	podev->platform_support.bus_scale_table =
-				platform_support->bus_scale_table;
 	podev->ce_lock_count = 0;
 	podev->high_bw_req_count = 0;
 	INIT_LIST_HEAD(&podev->ready_commands);
@@ -2050,7 +2037,6 @@
 	podev->qce = handle;
 	podev->pdev = pdev;
 	platform_set_drvdata(pdev, podev);
-	qce_hw_support(podev->qce, &podev->ce_support);
 
 	if (podev->platform_support.bus_scale_table != NULL) {
 		podev->bus_scale_handle =
@@ -2065,7 +2051,25 @@
 		}
 	}
 	rc = misc_register(&podev->miscdevice);
-
+	qce_hw_support(podev->qce, &podev->ce_support);
+	if (podev->ce_support.bam) {
+		podev->platform_support.ce_shared = 0;
+		podev->platform_support.shared_ce_resource = 0;
+		podev->platform_support.hw_key_support = 0;
+		podev->platform_support.bus_scale_table = NULL;
+		podev->platform_support.sha_hmac = 1;
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		podev->platform_support.ce_shared = platform_support->ce_shared;
+		podev->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+		podev->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+		podev->platform_support.bus_scale_table =
+				platform_support->bus_scale_table;
+		podev->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
 	if (rc >= 0)
 		return 0;
 	else
@@ -2230,9 +2234,7 @@
 }
 
 MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
 MODULE_DESCRIPTION("Qualcomm DEV Crypto driver");
-MODULE_VERSION("1.27");
 
 module_init(qcedev_init);
 module_exit(qcedev_exit);
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index c11c36e..7fc5cab 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -228,6 +228,13 @@
 	enum qce_cipher_alg_enum alg;
 	enum qce_cipher_dir_enum dir;
 	enum qce_cipher_mode_enum mode;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist *orig_dst;	/* Original dst sg ptr  */
+	struct scatterlist dsg;		/* Dest Data sg  */
+	struct scatterlist ssg;		/* Source Data sg  */
+	unsigned char *data;		/* Incoming data pointer*/
+
 };
 
 #define SHA_MAX_BLOCK_SIZE      SHA256_BLOCK_SIZE
@@ -275,6 +282,11 @@
 	};
 	struct scatterlist *src;
 	uint32_t nbytes;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist dsg;		/* Data sg */
+	unsigned char *data;		/* Incoming data pointer*/
+	unsigned char *data2;		/* Updated data pointer*/
 };
 
 static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
@@ -497,9 +509,6 @@
 				&sha_ctx->ahash_req_complete);
 	crypto_ahash_clear_flags(ahash, ~0);
 
-	if (sha_ctx->cp->platform_support.bus_scale_table != NULL)
-		qcrypto_ce_high_bw_req(sha_ctx->cp, true);
-
 	return 0;
 };
 
@@ -785,7 +794,6 @@
 	dev_info(&cp->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
 				areq, ret);
 #endif
-
 	if (digest) {
 		memcpy(sha_ctx->digest, digest, diglen);
 		memcpy(areq->result, digest, diglen);
@@ -819,6 +827,10 @@
 		cp->res = 0;
 		pstat->sha_op_success++;
 	}
+	if (cp->ce_support.aligned_only)  {
+		areq->src = rctx->orig_src;
+		kfree(rctx->data);
+	}
 
 	if (cp->platform_support.ce_shared)
 		schedule_work(&cp->unlock_ce_ws);
@@ -850,6 +862,24 @@
 		cp->res = 0;
 		pstat->ablk_cipher_op_success++;
 	}
+
+	if (cp->ce_support.aligned_only)  {
+		struct qcrypto_cipher_req_ctx *rctx;
+		struct scatterlist *sg;
+		uint32_t bytes = 0;
+
+		rctx = ablkcipher_request_ctx(areq);
+		areq->src = rctx->orig_src;
+		areq->dst = rctx->orig_dst;
+
+		for (sg = areq->dst; bytes != areq->nbytes; sg++) {
+			memcpy(sg_virt(sg), ((char *)rctx->data + bytes),
+								sg->length);
+			bytes += sg->length;
+		}
+		kfree(rctx->data);
+	}
+
 	if (cp->platform_support.ce_shared)
 		schedule_work(&cp->unlock_ce_ws);
 	tasklet_schedule(&cp->done_tasklet);
@@ -871,6 +901,30 @@
 	rctx = aead_request_ctx(areq);
 
 	if (rctx->mode == QCE_MODE_CCM) {
+		if (cp->ce_support.aligned_only)  {
+			struct qcrypto_cipher_req_ctx *rctx;
+			struct scatterlist *sg;
+			uint32_t bytes = 0;
+			uint32_t nbytes = 0;
+
+			rctx = aead_request_ctx(areq);
+			areq->src = rctx->orig_src;
+			areq->dst = rctx->orig_dst;
+			if (rctx->dir == QCE_ENCRYPT)
+				nbytes = areq->cryptlen +
+						crypto_aead_authsize(aead);
+			else
+				nbytes = areq->cryptlen -
+						crypto_aead_authsize(aead);
+
+			for (sg = areq->dst; bytes != nbytes; sg++) {
+				memcpy(sg_virt(sg),
+				((char *)rctx->data + rctx->assoclen + bytes),
+								sg->length);
+				bytes += sg->length;
+			}
+			kfree(rctx->data);
+		}
 		kzfree(rctx->assoc);
 		areq->assoc = rctx->assoc_sg;
 		areq->assoclen = rctx->assoclen;
@@ -997,17 +1051,222 @@
 	return 0;
 }
 
+static int _qcrypto_process_ablkcipher(struct crypto_priv *cp,
+				struct crypto_async_request *async_req)
+{
+	struct qce_req qreq;
+	int ret;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct ablkcipher_request *req;
+	struct crypto_ablkcipher *tfm;
+
+	req = container_of(async_req, struct ablkcipher_request, base);
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+	rctx = ablkcipher_request_ctx(req);
+	tfm = crypto_ablkcipher_reqtfm(req);
+	if (cp->ce_support.aligned_only) {
+		uint32_t bytes = 0;
+		struct scatterlist *sg = req->src;
+
+		rctx->orig_src = req->src;
+		rctx->orig_dst = req->dst;
+		rctx->data = kzalloc((req->nbytes + 64), GFP_KERNEL);
+		for (sg = req->src; bytes != req->nbytes; sg++) {
+			memcpy(((char *)rctx->data + bytes),
+					sg_virt(sg), sg->length);
+			bytes += sg->length;
+		}
+		sg_set_buf(&rctx->dsg, rctx->data, req->nbytes);
+		sg_mark_end(&rctx->dsg);
+		rctx->iv = req->info;
+
+		req->src = &rctx->dsg;
+		req->dst = &rctx->dsg;
+
+	}
+	qreq.op = QCE_REQ_ABLK_CIPHER;
+	qreq.qce_cb = _qce_ablk_cipher_complete;
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.iv = req->info;
+	qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
+	qreq.cryptlen = req->nbytes;
+	qreq.use_pmem = 0;
+
+	if ((cipher_ctx->enc_key_len == 0) &&
+			(cp->platform_support.hw_key_support == 0))
+		ret = -EINVAL;
+	else
+		ret =  qce_ablk_cipher_req(cp->qce, &qreq);
+
+	return ret;
+}
+
+static int _qcrypto_process_ahash(struct crypto_priv *cp,
+				struct crypto_async_request *async_req)
+{
+	struct ahash_request *req;
+	struct qce_sha_req sreq;
+	struct qcrypto_sha_ctx *sha_ctx;
+	int ret = 0;
+
+	req = container_of(async_req,
+				struct ahash_request, base);
+	sha_ctx = crypto_tfm_ctx(async_req->tfm);
+
+	sreq.qce_cb = _qce_ahash_complete;
+	sreq.digest =  &sha_ctx->digest[0];
+	sreq.src = req->src;
+	sreq.auth_data[0] = sha_ctx->byte_count[0];
+	sreq.auth_data[1] = sha_ctx->byte_count[1];
+	sreq.auth_data[2] = sha_ctx->byte_count[2];
+	sreq.auth_data[3] = sha_ctx->byte_count[3];
+	sreq.first_blk = sha_ctx->first_blk;
+	sreq.last_blk = sha_ctx->last_blk;
+	sreq.size = req->nbytes;
+	sreq.areq = req;
+
+	switch (sha_ctx->alg) {
+	case QCE_HASH_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA1_HMAC:
+		sreq.alg = QCE_HASH_SHA1_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	case QCE_HASH_SHA256_HMAC:
+		sreq.alg = QCE_HASH_SHA256_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	default:
+		break;
+	};
+	ret =  qce_process_sha_req(cp->qce, &sreq);
+
+	return ret;
+}
+
+static int _qcrypto_process_aead(struct crypto_priv *cp,
+				struct crypto_async_request *async_req)
+{
+	struct qce_req qreq;
+	int ret = 0;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct aead_request *req = container_of(async_req,
+				struct aead_request, base);
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+	rctx = aead_request_ctx(req);
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+
+	qreq.op = QCE_REQ_AEAD;
+	qreq.qce_cb = _qce_aead_complete;
+
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.iv = rctx->iv;
+
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.authkey = cipher_ctx->auth_key;
+	qreq.authklen = cipher_ctx->auth_key_len;
+	qreq.authsize = crypto_aead_authsize(aead);
+	qreq.ivsize =  crypto_aead_ivsize(aead);
+	if (qreq.mode == QCE_MODE_CCM) {
+		if (qreq.dir == QCE_ENCRYPT)
+			qreq.cryptlen = req->cryptlen;
+		else
+			qreq.cryptlen = req->cryptlen -
+						qreq.authsize;
+		/* Get NONCE */
+		ret = qccrypto_set_aead_ccm_nonce(&qreq);
+		if (ret)
+			return ret;
+
+		/* Format Associated data    */
+		ret = qcrypto_aead_ccm_format_adata(&qreq,
+						req->assoclen,
+						req->assoc);
+		if (ret)
+			return ret;
+
+		if (cp->ce_support.aligned_only) {
+			uint32_t bytes = 0;
+			struct scatterlist *sg = req->src;
+
+			rctx->orig_src = req->src;
+			rctx->orig_dst = req->dst;
+			rctx->data = kzalloc((req->cryptlen + qreq.assoclen +
+					qreq.authsize + 64*2), GFP_KERNEL);
+
+			memcpy((char *)rctx->data, qreq.assoc, qreq.assoclen);
+
+			for (sg = req->src; bytes != req->cryptlen; sg++) {
+				memcpy((rctx->data + bytes + qreq.assoclen),
+						sg_virt(sg), sg->length);
+				bytes += sg->length;
+			}
+			sg_set_buf(&rctx->ssg, rctx->data, req->cryptlen +
+							qreq.assoclen);
+			sg_mark_end(&rctx->ssg);
+
+			if (qreq.dir == QCE_ENCRYPT)
+				sg_set_buf(&rctx->dsg, rctx->data,
+					qreq.assoclen + qreq.cryptlen +
+					ALIGN(qreq.authsize, 64));
+			else
+				sg_set_buf(&rctx->dsg, rctx->data,
+						qreq.assoclen + req->cryptlen +
+						qreq.authsize);
+			sg_mark_end(&rctx->dsg);
+
+			req->src = &rctx->ssg;
+			req->dst = &rctx->dsg;
+		}
+		/*
+		 * Save the original associated data
+		 * length and sg
+		 */
+		rctx->assoc_sg  = req->assoc;
+		rctx->assoclen  = req->assoclen;
+		rctx->assoc  = qreq.assoc;
+		/*
+		 * update req with new formatted associated
+		 * data info
+		 */
+		req->assoc = &rctx->asg;
+		req->assoclen = qreq.assoclen;
+		sg_set_buf(req->assoc, qreq.assoc,
+					req->assoclen);
+		sg_mark_end(req->assoc);
+	}
+	ret =  qce_aead_req(cp->qce, &qreq);
+
+	return ret;
+}
+
 static void _start_qcrypto_process(struct crypto_priv *cp)
 {
 	struct crypto_async_request *async_req = NULL;
 	struct crypto_async_request *backlog = NULL;
 	unsigned long flags;
 	u32 type;
-	struct qce_req qreq;
 	int ret;
-	struct qcrypto_cipher_req_ctx *rctx;
-	struct qcrypto_cipher_ctx *cipher_ctx;
-	struct qcrypto_sha_ctx *sha_ctx;
 	struct crypto_stat *pstat;
 
 	pstat = &_qcrypto_stat[cp->pdev->id];
@@ -1026,139 +1285,21 @@
 		backlog->complete(backlog, -EINPROGRESS);
 	type = crypto_tfm_alg_type(async_req->tfm);
 
-	if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) {
-		struct ablkcipher_request *req;
-		struct crypto_ablkcipher *tfm;
-
-		req = container_of(async_req, struct ablkcipher_request, base);
-		cipher_ctx = crypto_tfm_ctx(async_req->tfm);
-		rctx = ablkcipher_request_ctx(req);
-		tfm = crypto_ablkcipher_reqtfm(req);
-
-		qreq.op = QCE_REQ_ABLK_CIPHER;
-		qreq.qce_cb = _qce_ablk_cipher_complete;
-		qreq.areq = req;
-		qreq.alg = rctx->alg;
-		qreq.dir = rctx->dir;
-		qreq.mode = rctx->mode;
-		qreq.enckey = cipher_ctx->enc_key;
-		qreq.encklen = cipher_ctx->enc_key_len;
-		qreq.iv = req->info;
-		qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
-		qreq.cryptlen = req->nbytes;
-		qreq.use_pmem = 0;
-
-		if ((cipher_ctx->enc_key_len == 0) &&
-				(cp->platform_support.hw_key_support == 0))
-			ret = -EINVAL;
-		else
-			ret =  qce_ablk_cipher_req(cp->qce, &qreq);
-	} else {
-		if (type == CRYPTO_ALG_TYPE_AHASH) {
-
-			struct ahash_request *req;
-			struct qce_sha_req sreq;
-
-			req = container_of(async_req,
-						struct ahash_request, base);
-			sha_ctx = crypto_tfm_ctx(async_req->tfm);
-
-			sreq.qce_cb = _qce_ahash_complete;
-			sreq.digest =  &sha_ctx->digest[0];
-			sreq.src = req->src;
-			sreq.auth_data[0] = sha_ctx->byte_count[0];
-			sreq.auth_data[1] = sha_ctx->byte_count[1];
-			sreq.auth_data[2] = sha_ctx->byte_count[2];
-			sreq.auth_data[3] = sha_ctx->byte_count[3];
-			sreq.first_blk = sha_ctx->first_blk;
-			sreq.last_blk = sha_ctx->last_blk;
-			sreq.size = req->nbytes;
-			sreq.areq = req;
-
-			switch (sha_ctx->alg) {
-			case QCE_HASH_SHA1:
-				sreq.alg = QCE_HASH_SHA1;
-				sreq.authkey = NULL;
-				break;
-			case QCE_HASH_SHA256:
-				sreq.alg = QCE_HASH_SHA256;
-				sreq.authkey = NULL;
-				break;
-			case QCE_HASH_SHA1_HMAC:
-				sreq.alg = QCE_HASH_SHA1_HMAC;
-				sreq.authkey = &sha_ctx->authkey[0];
-				break;
-			case QCE_HASH_SHA256_HMAC:
-				sreq.alg = QCE_HASH_SHA256_HMAC;
-				sreq.authkey = &sha_ctx->authkey[0];
-				break;
-			default:
-				break;
-			};
-			ret =  qce_process_sha_req(cp->qce, &sreq);
-
-		} else {
-			struct aead_request *req = container_of(async_req,
-						struct aead_request, base);
-			struct crypto_aead *aead = crypto_aead_reqtfm(req);
-
-			rctx = aead_request_ctx(req);
-			cipher_ctx = crypto_tfm_ctx(async_req->tfm);
-
-			qreq.op = QCE_REQ_AEAD;
-			qreq.qce_cb = _qce_aead_complete;
-
-			qreq.areq = req;
-			qreq.alg = rctx->alg;
-			qreq.dir = rctx->dir;
-			qreq.mode = rctx->mode;
-			qreq.iv = rctx->iv;
-
-			qreq.enckey = cipher_ctx->enc_key;
-			qreq.encklen = cipher_ctx->enc_key_len;
-			qreq.authkey = cipher_ctx->auth_key;
-			qreq.authklen = cipher_ctx->auth_key_len;
-			qreq.authsize = crypto_aead_authsize(aead);
-			qreq.ivsize =  crypto_aead_ivsize(aead);
-			if (qreq.mode == QCE_MODE_CCM) {
-				if (qreq.dir == QCE_ENCRYPT)
-					qreq.cryptlen = req->cryptlen;
-				else
-					qreq.cryptlen = req->cryptlen -
-								qreq.authsize;
-				/* Get NONCE */
-				ret = qccrypto_set_aead_ccm_nonce(&qreq);
-				if (ret)
-					goto done;
-				/* Format Associated data    */
-				ret = qcrypto_aead_ccm_format_adata(&qreq,
-								req->assoclen,
-								req->assoc);
-				if (ret)
-					goto done;
-				/*
-				 * Save the original associated data
-				 * length and sg
-				 */
-				rctx->assoc_sg  = req->assoc;
-				rctx->assoclen  = req->assoclen;
-				rctx->assoc  = qreq.assoc;
-				/*
-				 * update req with new formatted associated
-				 * data info
-				 */
-				req->assoc = &rctx->asg;
-				req->assoclen = qreq.assoclen;
-				sg_set_buf(req->assoc, qreq.assoc,
-							req->assoclen);
-				sg_mark_end(req->assoc);
-			}
-			ret =  qce_aead_req(cp->qce, &qreq);
-		}
+	switch (type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		ret = _qcrypto_process_ablkcipher(cp, async_req);
+		break;
+	case CRYPTO_ALG_TYPE_AHASH:
+		ret = _qcrypto_process_ahash(cp, async_req);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+		ret = _qcrypto_process_aead(cp, async_req);
+		break;
+	default:
+		ret = -EINVAL;
 	};
-done:
-	if (ret) {
 
+	if (ret) {
 		spin_lock_irqsave(&cp->lock, flags);
 		cp->req = NULL;
 		spin_unlock_irqrestore(&cp->lock, flags);
@@ -2118,6 +2259,26 @@
 	return 0;
 }
 
+static void _copy_source(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *srctx = NULL;
+	uint32_t bytes = 0;
+	struct scatterlist *sg = req->src;
+
+	srctx = ahash_request_ctx(req);
+	srctx->orig_src = req->src;
+	srctx->data = kzalloc((req->nbytes + 64), GFP_KERNEL);
+	for (sg = req->src; bytes != req->nbytes;
+						sg++) {
+		memcpy(((char *)srctx->data + bytes),
+			sg_virt(sg), sg->length);
+		bytes += sg->length;
+	}
+	sg_set_buf(&srctx->dsg, srctx->data,
+				req->nbytes);
+	sg_mark_end(&srctx->dsg);
+	req->src = &srctx->dsg;
+}
 
 static int _sha_update(struct ahash_request  *req, uint32_t sha_block_size)
 {
@@ -2198,23 +2359,55 @@
 	}
 
 	if (sha_ctx->trailing_buf_len) {
-		num_sg = end_src + 2;
-		sha_ctx->sg = kzalloc(num_sg * (sizeof(struct scatterlist)),
+		if (cp->ce_support.aligned_only)  {
+			sha_ctx->sg = kzalloc(sizeof(struct scatterlist),
 								GFP_KERNEL);
-		if (sha_ctx->sg == NULL) {
-			pr_err("qcrypto Can't Allocate mem: sha_ctx->sg, error %ld\n",
-				PTR_ERR(sha_ctx->sg));
-			return -ENOMEM;
-		}
-
-		sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
+			if (sha_ctx->sg == NULL) {
+				pr_err("MemAlloc fail sha_ctx->sg, error %ld\n",
+						PTR_ERR(sha_ctx->sg));
+				return -ENOMEM;
+			}
+			rctx->data2 = kzalloc((req->nbytes + 64), GFP_KERNEL);
+			if (rctx->data2 == NULL) {
+				pr_err("Mem Alloc fail srctx->data2, err %ld\n",
+							PTR_ERR(rctx->data2));
+				kfree(sha_ctx->sg);
+				return -ENOMEM;
+			}
+			memcpy(rctx->data2, sha_ctx->tmp_tbuf,
 						sha_ctx->trailing_buf_len);
-		for (i = 1; i < num_sg; i++)
-			sg_set_buf(&sha_ctx->sg[i], sg_virt(&req->src[i-1]),
-							req->src[i-1].length);
+			memcpy((rctx->data2 + sha_ctx->trailing_buf_len),
+					rctx->data, req->src[i-1].length);
+			kfree(rctx->data);
+			rctx->data = rctx->data2;
+			sg_set_buf(&sha_ctx->sg[0], rctx->data,
+					(sha_ctx->trailing_buf_len +
+							req->src[i-1].length));
+			req->src = sha_ctx->sg;
+			sg_mark_end(&sha_ctx->sg[0]);
 
-		req->src = sha_ctx->sg;
-		sg_mark_end(&sha_ctx->sg[num_sg - 1]);
+		} else {
+			num_sg = end_src + 2;
+
+			sha_ctx->sg = kzalloc(num_sg *
+				(sizeof(struct scatterlist)), GFP_KERNEL);
+			if (sha_ctx->sg == NULL) {
+				pr_err("MEMalloc fail sha_ctx->sg, error %ld\n",
+							PTR_ERR(sha_ctx->sg));
+				return -ENOMEM;
+			}
+
+			sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf,
+						sha_ctx->trailing_buf_len);
+			for (i = 1; i < num_sg; i++)
+				sg_set_buf(&sha_ctx->sg[i],
+						sg_virt(&req->src[i-1]),
+						req->src[i-1].length);
+
+			req->src = sha_ctx->sg;
+			sg_mark_end(&sha_ctx->sg[num_sg - 1]);
+
+		}
 	} else
 		sg_mark_end(&req->src[end_src]);
 
@@ -2232,6 +2425,11 @@
 {
 	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
 	struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx;
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only)
+		_copy_source(req);
 
 	sha_state_ctx->count += req->nbytes;
 	return _sha_update(req, SHA1_BLOCK_SIZE);
@@ -2241,6 +2439,11 @@
 {
 	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
 	struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx;
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only)
+		_copy_source(req);
 
 	sha_state_ctx->count += req->nbytes;
 	return _sha_update(req, SHA256_BLOCK_SIZE);
@@ -2253,6 +2456,9 @@
 	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
 	int ret = 0;
 
+	if (cp->ce_support.aligned_only)
+		_copy_source(req);
+
 	sha_ctx->last_blk = 1;
 
 	/* save the original req structure fields*/
@@ -2288,10 +2494,13 @@
 	struct crypto_priv *cp = sha_ctx->cp;
 	int ret = 0;
 
+	if (cp->ce_support.aligned_only)
+		_copy_source(req);
+
 	/* save the original req structure fields*/
 	rctx->src = req->src;
 	rctx->nbytes = req->nbytes;
-
+	sha_ctx->first_blk = 1;
 	sha_ctx->last_blk = 1;
 	ret =  _qcrypto_queue_req(cp, &req->base);
 
@@ -2326,7 +2535,7 @@
 	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
 	int ret = 0;
 
-	sha_ctx->in_buf = kzalloc(len, GFP_KERNEL);
+	sha_ctx->in_buf = kzalloc(len + 64, GFP_KERNEL);
 	if (sha_ctx->in_buf == NULL) {
 		pr_err("qcrypto Can't Allocate mem: sha_ctx->in_buf, error %ld\n",
 		PTR_ERR(sha_ctx->in_buf));
@@ -3079,17 +3288,27 @@
 	cp->qce = handle;
 	cp->pdev = pdev;
 	qce_hw_support(cp->qce, &cp->ce_support);
-	platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data;
-	cp->platform_support.ce_shared = platform_support->ce_shared;
-	cp->platform_support.shared_ce_resource =
+	if (cp->ce_support.bam)	 {
+		cp->platform_support.ce_shared = 0;
+		cp->platform_support.shared_ce_resource = 0;
+		cp->platform_support.hw_key_support = 0;
+		cp->platform_support.bus_scale_table =	NULL;
+		cp->platform_support.sha_hmac = 1;
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		cp->platform_support.ce_shared = platform_support->ce_shared;
+		cp->platform_support.shared_ce_resource =
 				platform_support->shared_ce_resource;
-	cp->platform_support.hw_key_support =
+		cp->platform_support.hw_key_support =
 				platform_support->hw_key_support;
-	cp->platform_support.bus_scale_table =
+		cp->platform_support.bus_scale_table =
 				platform_support->bus_scale_table;
+		cp->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
 	cp->high_bw_req_count = 0;
 	cp->ce_lock_count = 0;
-	cp->platform_support.sha_hmac = platform_support->sha_hmac;
+
 
 	if (cp->platform_support.ce_shared)
 		INIT_WORK(&cp->unlock_ce_ws, qcrypto_unlock_ce);
@@ -3370,6 +3589,4 @@
 module_exit(_qcrypto_exit);
 
 MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Mona Hossain <mhossain@codeaurora.org>");
 MODULE_DESCRIPTION("Qualcomm Crypto driver");
-MODULE_VERSION("1.22");
diff --git a/drivers/crypto/msm/qcryptohw_50.h b/drivers/crypto/msm/qcryptohw_50.h
new file mode 100644
index 0000000..898109e
--- /dev/null
+++ b/drivers/crypto/msm/qcryptohw_50.h
@@ -0,0 +1,501 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG			0x1A000
+
+#define CRYPTO_DATA_IN0_REG			0x1A010
+#define CRYPTO_DATA_IN1_REG			0x1A014
+#define CRYPTO_DATA_IN2_REG			0x1A018
+#define CRYPTO_DATA_IN3_REG			0x1A01C
+
+#define CRYPTO_DATA_OUT0_REG			0x1A020
+#define CRYPTO_DATA_OUT1_REG			0x1A024
+#define CRYPTO_DATA_OUT2_REG			0x1A028
+#define CRYPTO_DATA_OUT3_REG			0x1A02C
+
+#define CRYPTO_STATUS_REG			0x1A100
+#define CRYPTO_STATUS2_REG			0x1A100
+#define CRYPTO_ENGINES_AVAIL			0x1A108
+#define CRYPTO_FIFO_SIZES_REG			0x1A10C
+
+#define CRYPTO_SEG_SIZE_REG			0x1A110
+#define CRYPTO_GOPROC_REG			0x1A120
+#define CRYPTO_GOPROC_QC_KEY_REG		0x1B000
+#define CRYPTO_GOPROC_OEM_KEY_REG		0x1C000
+
+#define CRYPTO_ENCR_SEG_CFG_REG			0x1A200
+#define CRYPTO_ENCR_SEG_SIZE_REG		0x1A204
+#define CRYPTO_ENCR_SEG_START_REG		0x1A208
+
+#define CRYPTO_ENCR_KEY0_REG			0x1D000
+#define CRYPTO_ENCR_KEY1_REG			0x1D004
+#define CRYPTO_ENCR_KEY2_REG			0x1D008
+#define CRYPTO_ENCR_KEY3_REG			0x1D00C
+#define CRYPTO_ENCR_KEY4_REG			0x1D010
+#define CRYPTO_ENCR_KEY5_REG			0x1D014
+#define CRYPTO_ENCR_KEY6_REG			0x1D018
+#define CRYPTO_ENCR_KEY7_REG			0x1D01C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG		0x1D020
+#define CRYPTO_ENCR_XTS_KEY1_REG		0x1D024
+#define CRYPTO_ENCR_XTS_KEY2_REG		0x1D028
+#define CRYPTO_ENCR_XTS_KEY3_REG		0x1D02C
+#define CRYPTO_ENCR_XTS_KEY4_REG		0x1D030
+#define CRYPTO_ENCR_XTS_KEY5_REG		0x1D034
+#define CRYPTO_ENCR_XTS_KEY6_REG		0x1D038
+#define CRYPTO_ENCR_XTS_KEY7_REG		0x1D03C
+
+#define CRYPTO_ENCR_PIP0_KEY0_REG		0x1E000
+#define CRYPTO_ENCR_PIP0_KEY1_REG		0x1E004
+#define CRYPTO_ENCR_PIP0_KEY2_REG		0x1E008
+#define CRYPTO_ENCR_PIP0_KEY3_REG		0x1E00C
+#define CRYPTO_ENCR_PIP0_KEY4_REG		0x1E010
+#define CRYPTO_ENCR_PIP0_KEY5_REG		0x1E004
+#define CRYPTO_ENCR_PIP0_KEY6_REG		0x1E008
+#define CRYPTO_ENCR_PIP0_KEY7_REG		0x1E00C
+
+#define CRYPTO_ENCR_PIP1_KEY0_REG		0x1E000
+#define CRYPTO_ENCR_PIP1_KEY1_REG		0x1E004
+#define CRYPTO_ENCR_PIP1_KEY2_REG		0x1E008
+#define CRYPTO_ENCR_PIP1_KEY3_REG		0x1E00C
+#define CRYPTO_ENCR_PIP1_KEY4_REG		0x1E010
+#define CRYPTO_ENCR_PIP1_KEY5_REG		0x1E014
+#define CRYPTO_ENCR_PIP1_KEY6_REG		0x1E018
+#define CRYPTO_ENCR_PIP1_KEY7_REG		0x1E01C
+
+#define CRYPTO_ENCR_PIP2_KEY0_REG		0x1E020
+#define CRYPTO_ENCR_PIP2_KEY1_REG		0x1E024
+#define CRYPTO_ENCR_PIP2_KEY2_REG		0x1E028
+#define CRYPTO_ENCR_PIP2_KEY3_REG		0x1E02C
+#define CRYPTO_ENCR_PIP2_KEY4_REG		0x1E030
+#define CRYPTO_ENCR_PIP2_KEY5_REG		0x1E034
+#define CRYPTO_ENCR_PIP2_KEY6_REG		0x1E038
+#define CRYPTO_ENCR_PIP2_KEY7_REG		0x1E03C
+
+#define CRYPTO_ENCR_PIP3_KEY0_REG		0x1E040
+#define CRYPTO_ENCR_PIP3_KEY1_REG		0x1E044
+#define CRYPTO_ENCR_PIP3_KEY2_REG		0x1E048
+#define CRYPTO_ENCR_PIP3_KEY3_REG		0x1E04C
+#define CRYPTO_ENCR_PIP3_KEY4_REG		0x1E050
+#define CRYPTO_ENCR_PIP3_KEY5_REG		0x1E054
+#define CRYPTO_ENCR_PIP3_KEY6_REG		0x1E058
+#define CRYPTO_ENCR_PIP3_KEY7_REG		0x1E05C
+
+#define CRYPTO_ENCR_PIP0_XTS_KEY0_REG		0x1E200
+#define CRYPTO_ENCR_PIP0_XTS_KEY1_REG		0x1E204
+#define CRYPTO_ENCR_PIP0_XTS_KEY2_REG		0x1E208
+#define CRYPTO_ENCR_PIP0_XTS_KEY3_REG		0x1E20C
+#define CRYPTO_ENCR_PIP0_XTS_KEY4_REG		0x1E210
+#define CRYPTO_ENCR_PIP0_XTS_KEY5_REG		0x1E214
+#define CRYPTO_ENCR_PIP0_XTS_KEY6_REG		0x1E218
+#define CRYPTO_ENCR_PIP0_XTS_KEY7_REG		0x1E21C
+
+#define CRYPTO_ENCR_PIP1_XTS_KEY0_REG		0x1E220
+#define CRYPTO_ENCR_PIP1_XTS_KEY1_REG		0x1E224
+#define CRYPTO_ENCR_PIP1_XTS_KEY2_REG		0x1E228
+#define CRYPTO_ENCR_PIP1_XTS_KEY3_REG		0x1E22C
+#define CRYPTO_ENCR_PIP1_XTS_KEY4_REG		0x1E230
+#define CRYPTO_ENCR_PIP1_XTS_KEY5_REG		0x1E234
+#define CRYPTO_ENCR_PIP1_XTS_KEY6_REG		0x1E238
+#define CRYPTO_ENCR_PIP1_XTS_KEY7_REG		0x1E23C
+
+#define CRYPTO_ENCR_PIP2_XTS_KEY0_REG		0x1E240
+#define CRYPTO_ENCR_PIP2_XTS_KEY1_REG		0x1E244
+#define CRYPTO_ENCR_PIP2_XTS_KEY2_REG		0x1E248
+#define CRYPTO_ENCR_PIP2_XTS_KEY3_REG		0x1E24C
+#define CRYPTO_ENCR_PIP2_XTS_KEY4_REG		0x1E250
+#define CRYPTO_ENCR_PIP2_XTS_KEY5_REG		0x1E254
+#define CRYPTO_ENCR_PIP2_XTS_KEY6_REG		0x1E258
+#define CRYPTO_ENCR_PIP2_XTS_KEY7_REG		0x1E25C
+
+#define CRYPTO_ENCR_PIP3_XTS_KEY0_REG		0x1E260
+#define CRYPTO_ENCR_PIP3_XTS_KEY1_REG		0x1E264
+#define CRYPTO_ENCR_PIP3_XTS_KEY2_REG		0x1E268
+#define CRYPTO_ENCR_PIP3_XTS_KEY3_REG		0x1E26C
+#define CRYPTO_ENCR_PIP3_XTS_KEY4_REG		0x1E270
+#define CRYPTO_ENCR_PIP3_XTS_KEY5_REG		0x1E274
+#define CRYPTO_ENCR_PIP3_XTS_KEY6_REG		0x1E278
+#define CRYPTO_ENCR_PIP3_XTS_KEY7_REG		0x1E27C
+
+
+#define CRYPTO_CNTR0_IV0_REG			0x1A20C
+#define CRYPTO_CNTR1_IV1_REG			0x1A210
+#define CRYPTO_CNTR2_IV2_REG			0x1A214
+#define CRYPTO_CNTR3_IV3_REG			0x1A218
+
+#define CRYPTO_CNTR_MASK_REG			0x1A21C
+
+#define CRYPTO_ENCR_CCM_INT_CNTR0_REG		0x1A220
+#define CRYPTO_ENCR_CCM_INT_CNTR1_REG		0x1A224
+#define CRYPTO_ENCR_CCM_INT_CNTR2_REG		0x1A228
+#define CRYPTO_ENCR_CCM_INT_CNTR3_REG		0x1A22C
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG		0xA1230
+
+#define CRYPTO_AUTH_SEG_CFG_REG			0x1A300
+#define CRYPTO_AUTH_SEG_SIZE_REG		0x1A304
+#define CRYPTO_AUTH_SEG_START_REG		0x1A308
+
+#define CRYPTO_AUTH_KEY0_REG			0x1D040
+#define CRYPTO_AUTH_KEY1_REG			0x1D044
+#define CRYPTO_AUTH_KEY2_REG			0x1D048
+#define CRYPTO_AUTH_KEY3_REG			0x1D04C
+#define CRYPTO_AUTH_KEY4_REG			0x1D050
+#define CRYPTO_AUTH_KEY5_REG			0x1D054
+#define CRYPTO_AUTH_KEY6_REG			0x1D058
+#define CRYPTO_AUTH_KEY7_REG			0x1D05C
+#define CRYPTO_AUTH_KEY8_REG			0x1D060
+#define CRYPTO_AUTH_KEY9_REG			0x1D064
+#define CRYPTO_AUTH_KEY10_REG			0x1D068
+#define CRYPTO_AUTH_KEY11_REG			0x1D06C
+#define CRYPTO_AUTH_KEY12_REG			0x1D070
+#define CRYPTO_AUTH_KEY13_REG			0x1D074
+#define CRYPTO_AUTH_KEY14_REG			0x1D078
+#define CRYPTO_AUTH_KEY15_REG			0x1D07C
+
+#define CRYPTO_AUTH_PIPE0_KEY0_REG		0x1E800
+#define CRYPTO_AUTH_PIPE0_KEY1_REG		0x1E804
+#define CRYPTO_AUTH_PIPE0_KEY2_REG		0x1E808
+#define CRYPTO_AUTH_PIPE0_KEY3_REG		0x1E80C
+#define CRYPTO_AUTH_PIPE0_KEY4_REG		0x1E810
+#define CRYPTO_AUTH_PIPE0_KEY5_REG		0x1E814
+#define CRYPTO_AUTH_PIPE0_KEY6_REG		0x1E818
+#define CRYPTO_AUTH_PIPE0_KEY7_REG		0x1E81C
+#define CRYPTO_AUTH_PIPE0_KEY8_REG		0x1E820
+#define CRYPTO_AUTH_PIPE0_KEY9_REG		0x1E824
+#define CRYPTO_AUTH_PIPE0_KEY10_REG		0x1E828
+#define CRYPTO_AUTH_PIPE0_KEY11_REG		0x1E82C
+#define CRYPTO_AUTH_PIPE0_KEY12_REG		0x1E830
+#define CRYPTO_AUTH_PIPE0_KEY13_REG		0x1E834
+#define CRYPTO_AUTH_PIPE0_KEY14_REG		0x1E838
+#define CRYPTO_AUTH_PIPE0_KEY15_REG		0x1E83C
+
+#define CRYPTO_AUTH_PIPE1_KEY0_REG		0x1E800
+#define CRYPTO_AUTH_PIPE1_KEY1_REG		0x1E804
+#define CRYPTO_AUTH_PIPE1_KEY2_REG		0x1E808
+#define CRYPTO_AUTH_PIPE1_KEY3_REG		0x1E80C
+#define CRYPTO_AUTH_PIPE1_KEY4_REG		0x1E810
+#define CRYPTO_AUTH_PIPE1_KEY5_REG		0x1E814
+#define CRYPTO_AUTH_PIPE1_KEY6_REG		0x1E818
+#define CRYPTO_AUTH_PIPE1_KEY7_REG		0x1E81C
+#define CRYPTO_AUTH_PIPE1_KEY8_REG		0x1E820
+#define CRYPTO_AUTH_PIPE1_KEY9_REG		0x1E824
+#define CRYPTO_AUTH_PIPE1_KEY10_REG		0x1E828
+#define CRYPTO_AUTH_PIPE1_KEY11_REG		0x1E82C
+#define CRYPTO_AUTH_PIPE1_KEY12_REG		0x1E830
+#define CRYPTO_AUTH_PIPE1_KEY13_REG		0x1E834
+#define CRYPTO_AUTH_PIPE1_KEY14_REG		0x1E838
+#define CRYPTO_AUTH_PIPE1_KEY15_REG		0x1E83C
+
+#define CRYPTO_AUTH_PIPE2_KEY0_REG		0x1E840
+#define CRYPTO_AUTH_PIPE2_KEY1_REG		0x1E844
+#define CRYPTO_AUTH_PIPE2_KEY2_REG		0x1E848
+#define CRYPTO_AUTH_PIPE2_KEY3_REG		0x1E84C
+#define CRYPTO_AUTH_PIPE2_KEY4_REG		0x1E850
+#define CRYPTO_AUTH_PIPE2_KEY5_REG		0x1E854
+#define CRYPTO_AUTH_PIPE2_KEY6_REG		0x1E858
+#define CRYPTO_AUTH_PIPE2_KEY7_REG		0x1E85C
+#define CRYPTO_AUTH_PIPE2_KEY8_REG		0x1E860
+#define CRYPTO_AUTH_PIPE2_KEY9_REG		0x1E864
+#define CRYPTO_AUTH_PIPE2_KEY10_REG		0x1E868
+#define CRYPTO_AUTH_PIPE2_KEY11_REG		0x1E86C
+#define CRYPTO_AUTH_PIPE2_KEY12_REG		0x1E870
+#define CRYPTO_AUTH_PIPE2_KEY13_REG		0x1E874
+#define CRYPTO_AUTH_PIPE2_KEY14_REG		0x1E878
+#define CRYPTO_AUTH_PIPE2_KEY15_REG		0x1E87C
+
+#define CRYPTO_AUTH_PIPE3_KEY0_REG		0x1E880
+#define CRYPTO_AUTH_PIPE3_KEY1_REG		0x1E884
+#define CRYPTO_AUTH_PIPE3_KEY2_REG		0x1E888
+#define CRYPTO_AUTH_PIPE3_KEY3_REG		0x1E88C
+#define CRYPTO_AUTH_PIPE3_KEY4_REG		0x1E890
+#define CRYPTO_AUTH_PIPE3_KEY5_REG		0x1E894
+#define CRYPTO_AUTH_PIPE3_KEY6_REG		0x1E898
+#define CRYPTO_AUTH_PIPE3_KEY7_REG		0x1E89C
+#define CRYPTO_AUTH_PIPE3_KEY8_REG		0x1E8A0
+#define CRYPTO_AUTH_PIPE3_KEY9_REG		0x1E8A4
+#define CRYPTO_AUTH_PIPE3_KEY10_REG		0x1E8A8
+#define CRYPTO_AUTH_PIPE3_KEY11_REG		0x1E8AC
+#define CRYPTO_AUTH_PIPE3_KEY12_REG		0x1E8B0
+#define CRYPTO_AUTH_PIPE3_KEY13_REG		0x1E8B4
+#define CRYPTO_AUTH_PIPE3_KEY14_REG		0x1E8B8
+#define CRYPTO_AUTH_PIPE3_KEY15_REG		0x1E8BC
+
+
+#define CRYPTO_AUTH_IV0_REG			0x1A310
+#define CRYPTO_AUTH_IV1_REG			0x1A314
+#define CRYPTO_AUTH_IV2_REG			0x1A318
+#define CRYPTO_AUTH_IV3_REG			0x1A31C
+#define CRYPTO_AUTH_IV4_REG			0x1A320
+#define CRYPTO_AUTH_IV5_REG			0x1A324
+#define CRYPTO_AUTH_IV6_REG			0x1A328
+#define CRYPTO_AUTH_IV7_REG			0x1A32C
+#define CRYPTO_AUTH_IV8_REG			0x1A330
+#define CRYPTO_AUTH_IV9_REG			0x1A334
+#define CRYPTO_AUTH_IV10_REG			0x1A338
+#define CRYPTO_AUTH_IV11_REG			0x1A33C
+#define CRYPTO_AUTH_IV12_REG			0x1A340
+#define CRYPTO_AUTH_IV13_REG			0x1A344
+#define CRYPTO_AUTH_IV14_REG			0x1A348
+#define CRYPTO_AUTH_IV15_REG			0x1A34C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG		0x1A350
+#define CRYPTO_AUTH_INFO_NONCE1_REG		0x1A354
+#define CRYPTO_AUTH_INFO_NONCE2_REG		0x1A358
+#define CRYPTO_AUTH_INFO_NONCE3_REG		0x1A35C
+
+#define CRYPTO_AUTH_BYTECNT0_REG		0x1A390
+#define CRYPTO_AUTH_BYTECNT1_REG		0x1A394
+#define CRYPTO_AUTH_BYTECNT2_REG		0x1A398
+#define CRYPTO_AUTH_BYTECNT3_REG		0x1A39C
+
+#define CRYPTO_AUTH_EXP_MAC0_REG		0x1A3A0
+#define CRYPTO_AUTH_EXP_MAC1_REG		0x1A3A4
+#define CRYPTO_AUTH_EXP_MAC2_REG		0x1A3A8
+#define CRYPTO_AUTH_EXP_MAC3_REG		0x1A3AC
+#define CRYPTO_AUTH_EXP_MAC4_REG		0x1A3B0
+#define CRYPTO_AUTH_EXP_MAC5_REG		0x1A3B4
+#define CRYPTO_AUTH_EXP_MAC6_REG		0x1A3B8
+#define CRYPTO_AUTH_EXP_MAC7_REG		0x1A3BC
+
+#define CRYPTO_CONFIG_REG			0x1A400
+#define CRYPTO_DEBUG_ENABLE_REG			0x1AF00
+#define CRYPTO_DEBUG_REG			0x1AF04
+
+
+
+/* Register bits */
+#define CRYPTO_CORE_STEP_REV_MASK		0xFFFF
+#define CRYPTO_CORE_STEP_REV			0 /* bit 15-0 */
+#define CRYPTO_CORE_MAJOR_REV_MASK		0xFF000000
+#define CRYPTO_CORE_MAJOR_REV			24 /* bit 31-24 */
+#define CRYPTO_CORE_MINOR_REV_MASK		0xFF0000
+#define CRYPTO_CORE_MINOR_REV			16 /* bit 23-16 */
+
+/* status reg  */
+#define CRYPTO_MAC_FAILED			31
+#define CRYPTO_DOUT_SIZE_AVAIL			26 /* bit 30-26 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL			21 /* bit 21-25 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_ACCESS_VIOL			19
+#define CRYPTO_PIPE_ACTIVE_ERR			18
+#define CRYPTO_CFG_CHNG_ERR			17
+#define CRYPTO_DOUT_ERR				16
+#define CRYPTO_DIN_ERR				15
+#define CRYPTO_AXI_ERR				14
+#define CRYPTO_CRYPTO_STATE			10 /* bit 13-10 */
+#define CRYPTO_CRYPTO_STATE_MASK		(0xF << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY			9
+#define CRYPTO_AUTH_BUSY			8
+#define CRYPTO_DOUT_INTR			7
+#define CRYPTO_DIN_INTR				6
+#define CRYPTO_OP_DONE_INTR			5
+#define CRYPTO_ERR_INTR				4
+#define CRYPTO_DOUT_RDY				3
+#define CRYPTO_DIN_RDY				2
+#define CRYPTO_OPERATION_DONE			1
+#define CRYPTO_SW_ERR				0
+
+/* status2 reg  */
+#define CRYPTO_AXI_EXTRA			1
+#define CRYPTO_LOCKED				2
+
+/* config reg */
+#define CRYPTO_REQ_SIZE				17 /* bit 20-17 */
+#define CRYPTO_REQ_SIZE_MASK			(0xF << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_1_BEAT	0
+#define CRYPTO_REQ_SIZE_ENUM_2_BEAT	1
+#define CRYPTO_REQ_SIZE_ENUM_3_BEAT	2
+#define CRYPTO_REQ_SIZE_ENUM_4_BEAT	3
+#define CRYPTO_REQ_SIZE_ENUM_5_BEAT	4
+#define CRYPTO_REQ_SIZE_ENUM_6_BEAT	5
+#define CRYPTO_REQ_SIZE_ENUM_7_BEAT	6
+#define CRYPTO_REQ_SIZE_ENUM_8_BEAT	7
+#define CRYPTO_REQ_SIZE_ENUM_9_BEAT	8
+#define CRYPTO_REQ_SIZE_ENUM_10_BEAT	9
+#define CRYPTO_REQ_SIZE_ENUM_11_BEAT	10
+#define CRYPTO_REQ_SIZE_ENUM_12_BEAT	11
+#define CRYPTO_REQ_SIZE_ENUM_13_BEAT	12
+#define CRYPTO_REQ_SIZE_ENUM_14_BEAT	13
+#define CRYPTO_REQ_SIZE_ENUM_15_BEAT	14
+#define CRYPTO_REQ_SIZE_ENUM_16_BEAT	15
+
+#define CRYPTO_MAX_QUEUED_REQ			14 /* bit 16-14 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK		(0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM_1_QUEUED_REQS	0
+#define CRYPTO_ENUM_2_QUEUED_REQS	1
+#define CRYPTO_ENUM_3_QUEUED_REQS	2
+
+#define CRYPTO_IRQ_ENABLES			10	/* bit 13-10 */
+#define CRYPTO_IRQ_ENABLES_MASK			(0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_LITTLE_ENDIAN_MODE		9
+#define CRYPTO_PIPE_SET_SELECT			5 /* bit 8-5 */
+#define CRYPTO_PIPE_SET_SELECT_MASK		(0xF << CRYPTO_PIPE_SET_SELECT)
+
+#define CRYPTO_HIGH_SPD_EN_N			4
+
+#define CRYPTO_MASK_DOUT_INTR			3
+#define CRYPTO_MASK_DIN_INTR			2
+#define CRYPTO_MASK_OP_DONE_INTR		1
+#define CRYPTO_MASK_ERR_INTR			0
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC			24
+#define CRYPTO_COMP_EXP_MAC_DISABLED		0
+#define CRYPTO_COMP_EXP_MAC_ENABLED		1
+
+#define CRYPTO_F9_DIRECTION			23
+#define CRYPTO_F9_DIRECTION_UPLINK		0
+#define CRYPTO_F9_DIRECTION_DOWNLINK		1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS		20 /* bit 22-20 */
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+				(0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_PIPE_KEY_AUTH		19
+#define CRYPTO_USE_HW_KEY_AUTH			18
+#define CRYPTO_FIRST				17
+#define CRYPTO_LAST				16
+
+#define CRYPTO_AUTH_POS				15 /* bit 15 .. 14*/
+#define CRYPTO_AUTH_POS_MASK			(0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE			0
+#define CRYPTO_AUTH_POS_AFTER			1
+
+#define CRYPTO_AUTH_SIZE			9 /* bits 13 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK			(0x1F << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1		0
+#define CRYPTO_AUTH_SIZE_SHA256		1
+#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES	0
+#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES	1
+#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES	2
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES	3
+#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES	4
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES	5
+#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES	6
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES	7
+#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES	8
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES	9
+#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES	10
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES	11
+#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES	12
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES	13
+#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES	14
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES	15
+
+
+#define CRYPTO_AUTH_MODE			6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK			(0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH	0
+#define CRYPTO_AUTH_MODE_HMAC	1
+#define CRYPTO_AUTH_MODE_CCM	0
+#define CRYPTO_AUTH_MODE_CMAC	1
+
+#define CRYPTO_AUTH_KEY_SIZE			3  /* bit 5 .. 3*/
+#define CRYPTO_AUTH_KEY_SIZE_MASK		(0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128	0
+#define CRYPTO_AUTH_KEY_SZ_AES256	2
+
+#define CRYPTO_AUTH_ALG				0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK			7
+#define CRYPTO_AUTH_ALG_NONE	0
+#define CRYPTO_AUTH_ALG_SHA	1
+#define CRYPTO_AUTH_ALG_AES	2
+#define CRYPTO_AUTH_ALG_KASUMI	3
+#define CRYPTO_AUTH_ALG_SNOW3G	4
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE			0 /* bit 19-0  */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK		0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE		17/* bit */
+#define CRYPTO_F8_KEYSTREAM_DISABLED	0
+#define CRYPTO_F8_KEYSTREAM_ENABLED	1
+
+#define CRYPTO_F8_DIRECTION			16 /* bit */
+#define CRYPTO_F8_DIRECTION_UPLINK	0
+#define CRYPTO_F8_DIRECTION_DOWNLINK	1
+
+
+#define CRYPTO_USE_PIPE_KEY_ENCR		15 /* bit */
+#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED	1
+#define CRYPTO_USE_KEY_REGISTERS		0
+
+
+#define CRYPTO_USE_HW_KEY_ENCR			14
+#define CRYPTO_USE_KEY_REG	0
+#define CRYPTO_USE_HW_KEY	1
+
+#define CRYPTO_LAST_CCM				13
+#define CRYPTO_LAST_CCM_XFR	1
+#define CRYPTO_INTERM_CCM_XFR	0
+
+
+#define CRYPTO_CNTR_ALG				11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK			(3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST	0
+
+#define CRYPTO_ENCODE				10
+
+#define CRYPTO_ENCR_MODE			6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK			(0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB	0
+#define CRYPTO_ENCR_MODE_CBC	1
+#define CRYPTO_ENCR_MODE_CTR	2
+#define CRYPTO_ENCR_MODE_XTS	3
+#define CRYPTO_ENCR_MODE_CCM	4
+
+#define CRYPTO_ENCR_KEY_SZ			3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK			(7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES		0
+#define CRYPTO_ENCR_KEY_SZ_3DES		1
+#define CRYPTO_ENCR_KEY_SZ_AES128	0
+#define CRYPTO_ENCR_KEY_SZ_AES256	2
+#define CRYPTO_ENCR_KEY_SZ_UEA1		0
+#define CRYPTO_ENCR_KEY_SZ_UEA2		1
+
+#define CRYPTO_ENCR_ALG				0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK			(7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE		0
+#define CRYPTO_ENCR_ALG_DES		1
+#define CRYPTO_ENCR_ALG_AES		2
+#define CRYPTO_ENCR_ALG_KASUMI		3
+#define CRYPTO_ENCR_ALG_SNOW_3G		5
+
+/* goproc reg */
+#define CRYPTO_GO				0
+#define CRYPTO_CLR_CNTXT			1
+#define CRYPTO_RESULTS_DUMP			2
+
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL			0
+#define CRYPTO_DES_SEL				3
+#define CRYPTO_ENCR_SNOW3G_SEL			4
+#define CRYPTO_ENCR_KASUMI_SEL			5
+#define CRYPTO_SHA_SEL				6
+#define CRYPTO_SHA512_SEL			7
+#define CRYPTO_AUTH_AES_SEL			8
+#define CRYPTO_AUTH_SNOW3G_SEL			9
+#define CRYPTO_AUTH_KASUMI_SEL			10
+#define CRYPTO_BAM_SEL				11
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 14070a7..e1755de 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -303,8 +303,8 @@
 					device->mmu.setstate_memory.gpuaddr +
 					KGSL_IOMMU_SETSTATE_NOP_OFFSET);
 
+	pt_val = kgsl_mmu_pt_get_base_addr(device->mmu.hwpagetable);
 	if (flags & KGSL_MMUFLAGS_PTUPDATE) {
-		pt_val = kgsl_mmu_pt_get_base_addr(device->mmu.hwpagetable);
 		/*
 		 * We need to perfrom the following operations for all
 		 * IOMMU units
@@ -338,24 +338,6 @@
 				reg_pt_val,
 				device->mmu.setstate_memory.gpuaddr +
 				KGSL_IOMMU_SETSTATE_NOP_OFFSET);
-
-			/* set the asid */
-			*cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
-			*cmds++ = reg_map_desc[i]->gpuaddr +
-				(KGSL_IOMMU_CONTEXT_USER <<
-				KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_CONTEXTIDR;
-			*cmds++ = kgsl_mmu_get_hwpagetable_asid(&device->mmu);
-			*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
-			*cmds++ = 0x00000000;
-
-			/* Read back asid to ensure above write completes */
-			cmds += adreno_add_read_cmds(device, cmds,
-				reg_map_desc[i]->gpuaddr +
-				(KGSL_IOMMU_CONTEXT_USER <<
-				KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_CONTEXTIDR,
-				kgsl_mmu_get_hwpagetable_asid(&device->mmu),
-				device->mmu.setstate_memory.gpuaddr +
-				KGSL_IOMMU_SETSTATE_NOP_OFFSET);
 		}
 		/* invalidate all base pointers */
 		*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
@@ -367,15 +349,21 @@
 	}
 	if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
 		/*
-		 * tlb flush based on asid, no need to flush entire tlb
+		 * tlb flush
 		 */
 		for (i = 0; i < num_iommu_units; i++) {
+			reg_pt_val = (pt_val &
+				(KGSL_IOMMU_TTBR0_PA_MASK <<
+				KGSL_IOMMU_TTBR0_PA_SHIFT)) +
+				kgsl_mmu_get_pt_lsb(&device->mmu, i,
+					KGSL_IOMMU_CONTEXT_USER);
+
 			*cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
 			*cmds++ = (reg_map_desc[i]->gpuaddr +
 				(KGSL_IOMMU_CONTEXT_USER <<
 				KGSL_IOMMU_CTX_SHIFT) +
-				KGSL_IOMMU_CTX_TLBIASID);
-			*cmds++ = kgsl_mmu_get_hwpagetable_asid(&device->mmu);
+				KGSL_IOMMU_CTX_TLBIALL);
+			*cmds++ = 1;
 
 			cmds += __adreno_add_idle_indirect_cmds(cmds,
 			device->mmu.setstate_memory.gpuaddr +
@@ -384,9 +372,8 @@
 			cmds += adreno_add_read_cmds(device, cmds,
 				reg_map_desc[i]->gpuaddr +
 				(KGSL_IOMMU_CONTEXT_USER <<
-				KGSL_IOMMU_CTX_SHIFT) +
-				KGSL_IOMMU_CONTEXTIDR,
-				kgsl_mmu_get_hwpagetable_asid(&device->mmu),
+				KGSL_IOMMU_CTX_SHIFT) + KGSL_IOMMU_TTBR0,
+				reg_pt_val,
 				device->mmu.setstate_memory.gpuaddr +
 				KGSL_IOMMU_SETSTATE_NOP_OFFSET);
 		}
@@ -948,6 +935,15 @@
 		kgsl_mmu_setstate(&device->mmu, adreno_context->pagetable,
 			KGSL_MEMSTORE_GLOBAL);
 
+	/* If iommu is used then we need to make sure that the iommu clocks
+	 * are on since there could be commands in pipeline that touch iommu */
+	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+		ret = kgsl_mmu_enable_clk(&device->mmu,
+			KGSL_IOMMU_CONTEXT_USER);
+		if (ret)
+			goto done;
+	}
+
 	/* Do not try the bad caommands if recovery has failed bad commands
 	 * once already */
 	if (!try_bad_commands)
@@ -973,6 +969,18 @@
 				"Device start failed in recovery\n");
 				goto done;
 			}
+			if (context)
+				kgsl_mmu_setstate(&device->mmu,
+						adreno_context->pagetable,
+						KGSL_MEMSTORE_GLOBAL);
+
+			if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+				ret = kgsl_mmu_enable_clk(&device->mmu,
+						KGSL_IOMMU_CONTEXT_USER);
+				if (ret)
+					goto done;
+			}
+
 			ret = idle_ret;
 			KGSL_DRV_ERR(device,
 			"Bad context commands hung in recovery\n");
@@ -1008,6 +1016,9 @@
 		}
 	}
 done:
+	/* Turn off iommu clocks */
+	if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
+		kgsl_mmu_disable_clk_on_ts(&device->mmu, 0, false);
 	return ret;
 }
 
@@ -1735,6 +1746,13 @@
 	} while (time_elapsed < msecs);
 
 hang_dump:
+	/*
+	 * Check if timestamp has retired here because we may have hit
+	 * recovery which can take some time and cause waiting threads
+	 * to timeout
+	 */
+	if (kgsl_check_timestamp(device, context, timestamp))
+		goto done;
 	status = -ETIMEDOUT;
 	KGSL_DRV_ERR(device,
 		     "Device hang detected while waiting for timestamp: "
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index d54ce6b..86a349ad 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -120,6 +120,8 @@
 			goto err;
 		}
 
+		continue;
+
 err:
 		if (!adreno_dump_and_recover(rb->device))
 				wait_time = jiffies + wait_timeout;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2a9f564..278be99 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -897,6 +897,9 @@
 {
 	struct rb_node *node = private->mem_rb.rb_node;
 
+	if (!kgsl_mmu_gpuaddr_in_range(gpuaddr))
+		return NULL;
+
 	while (node != NULL) {
 		struct kgsl_mem_entry *entry;
 
@@ -1112,6 +1115,19 @@
 			goto done;
 		}
 
+		/*
+		 * Put a reasonable upper limit on the number of IBs that can be
+		 * submitted
+		 */
+
+		if (param->numibs > 10000) {
+			KGSL_DRV_ERR(dev_priv->device,
+				"Too many IBs submitted. count: %d max 10000\n",
+				param->numibs);
+			result = -EINVAL;
+			goto done;
+		}
+
 		ibdesc = kzalloc(sizeof(struct kgsl_ibdesc) * param->numibs,
 					GFP_KERNEL);
 		if (!ibdesc) {
diff --git a/drivers/gpu/msm/kgsl_gpummu.c b/drivers/gpu/msm/kgsl_gpummu.c
index 998eaab..edccff1 100644
--- a/drivers/gpu/msm/kgsl_gpummu.c
+++ b/drivers/gpu/msm/kgsl_gpummu.c
@@ -732,7 +732,6 @@
 	.mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
 	.mmu_enable_clk = NULL,
 	.mmu_disable_clk_on_ts = NULL,
-	.mmu_get_hwpagetable_asid = NULL,
 	.mmu_get_pt_lsb = NULL,
 	.mmu_get_reg_map_desc = NULL,
 };
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 25d0463..016771b 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -292,14 +292,6 @@
 	struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
 	if (iommu_pt->domain)
 		iommu_domain_free(iommu_pt->domain);
-	if (iommu_pt->iommu) {
-		if ((KGSL_IOMMU_ASID_REUSE == iommu_pt->asid) &&
-			iommu_pt->iommu->asid_reuse)
-			iommu_pt->iommu->asid_reuse--;
-		if (!iommu_pt->iommu->asid_reuse ||
-			(KGSL_IOMMU_ASID_REUSE != iommu_pt->asid))
-			clear_bit(iommu_pt->asid, iommu_pt->iommu->asids);
-	}
 	kfree(iommu_pt);
 }
 
@@ -621,20 +613,15 @@
 				unsigned int context_id)
 {
 	if (mmu->flags & KGSL_FLAGS_STARTED) {
-		struct kgsl_iommu *iommu = mmu->priv;
-		struct kgsl_iommu_pt *iommu_pt = pagetable->priv;
 		/* page table not current, then setup mmu to use new
 		 *  specified page table
 		 */
 		if (mmu->hwpagetable != pagetable) {
 			unsigned int flags = 0;
 			mmu->hwpagetable = pagetable;
-			/* force tlb flush if asid is reused */
-			if (iommu->asid_reuse &&
-				(KGSL_IOMMU_ASID_REUSE == iommu_pt->asid))
-				flags |= KGSL_MMUFLAGS_TLBFLUSH;
 			flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
-							mmu->device->id);
+							mmu->device->id) |
+							KGSL_MMUFLAGS_TLBFLUSH;
 			kgsl_setstate(mmu, context_id,
 				KGSL_MMUFLAGS_PTUPDATE | flags);
 		}
@@ -657,14 +644,6 @@
 				sizeof(struct kgsl_iommu));
 		return -ENOMEM;
 	}
-	iommu->asids = kzalloc(BITS_TO_LONGS(KGSL_IOMMU_MAX_ASIDS) *
-				sizeof(unsigned long), GFP_KERNEL);
-	if (!iommu->asids) {
-		KGSL_CORE_ERR("kzalloc(%d) failed\n",
-				sizeof(struct kgsl_iommu));
-		status = -ENOMEM;
-		goto done;
-	}
 
 	mmu->priv = iommu;
 	status = kgsl_get_iommu_ctxt(mmu);
@@ -684,7 +663,6 @@
 			__func__);
 done:
 	if (status) {
-		kfree(iommu->asids);
 		kfree(iommu);
 		mmu->priv = NULL;
 	}
@@ -718,7 +696,6 @@
 			goto err;
 		}
 		iommu_pt = mmu->priv_bank_table->priv;
-		iommu_pt->asid = 1;
 	}
 	mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
 	/* Return error if the default pagetable doesn't exist */
@@ -740,14 +717,6 @@
 			goto err;
 		}
 	}
-	/*
-	 * The dafault pagetable always has asid 0 assigned by the iommu driver
-	 * and asid 1 is assigned to the private context.
-	 */
-	iommu_pt = mmu->defaultpagetable->priv;
-	iommu_pt->asid = 0;
-	set_bit(0, iommu->asids);
-	set_bit(1, iommu->asids);
 	return status;
 err:
 	for (i--; i >= 0; i--) {
@@ -818,12 +787,6 @@
 	 */
 	for (i = 0; i < iommu->unit_count; i++) {
 		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
-		/* Make sure that the ASID of the priv bank is set to 1.
-		 * When we a different pagetable for the priv bank then the
-		 * iommu driver sets the ASID to 0 instead of 1 */
-		KGSL_IOMMU_SET_IOMMU_REG(iommu->iommu_units[i].reg_map.hostptr,
-					KGSL_IOMMU_CONTEXT_PRIV,
-					CONTEXTIDR, 1);
 		for (j = 0; j < iommu_unit->dev_count; j++)
 			iommu_unit->dev[j].pt_lsb = KGSL_IOMMMU_PT_LSB(
 						KGSL_IOMMU_GET_IOMMU_REG(
@@ -831,10 +794,6 @@
 						iommu_unit->dev[j].ctx_id,
 						TTBR0));
 	}
-	iommu->asid = KGSL_IOMMU_GET_IOMMU_REG(
-				iommu->iommu_units[0].reg_map.hostptr,
-				KGSL_IOMMU_CONTEXT_USER,
-				CONTEXTIDR);
 
 	kgsl_iommu_disable_clk_on_ts(mmu, 0, false);
 	mmu->flags |= KGSL_FLAGS_STARTED;
@@ -955,7 +914,6 @@
 		kgsl_mmu_putpagetable(mmu->priv_bank_table);
 	if (mmu->defaultpagetable)
 		kgsl_mmu_putpagetable(mmu->defaultpagetable);
-	kfree(iommu->asids);
 	kfree(iommu);
 
 	return 0;
@@ -981,47 +939,6 @@
 }
 
 /*
- * kgsl_iommu_get_hwpagetable_asid - Returns asid(application space ID) for a
- * pagetable
- * @mmu - Pointer to mmu structure
- *
- * Allocates an asid to a IOMMU domain if it does not already have one. asid's
- * are unique identifiers for pagetable that can be used to selectively flush
- * tlb entries of the IOMMU unit.
- * Return - asid to be used with the IOMMU domain
- */
-static int kgsl_iommu_get_hwpagetable_asid(struct kgsl_mmu *mmu)
-{
-	struct kgsl_iommu *iommu = mmu->priv;
-	struct kgsl_iommu_pt *iommu_pt = mmu->hwpagetable->priv;
-
-	/*
-	 * If the iommu pagetable does not have any asid assigned and is not the
-	 * default pagetable then assign asid.
-	 */
-	if (!iommu_pt->asid && iommu_pt != mmu->defaultpagetable->priv) {
-		iommu_pt->asid = find_first_zero_bit(iommu->asids,
-							KGSL_IOMMU_MAX_ASIDS);
-		/* No free bits means reuse asid */
-		if (iommu_pt->asid >= KGSL_IOMMU_MAX_ASIDS) {
-			iommu_pt->asid = KGSL_IOMMU_ASID_REUSE;
-			iommu->asid_reuse++;
-		}
-		set_bit(iommu_pt->asid, iommu->asids);
-		/*
-		 * Store pointer to asids list so that during pagetable destroy
-		 * the asid assigned to this pagetable may be cleared
-		 */
-		iommu_pt->iommu = iommu;
-	}
-	/* Return the asid + the constant part of asid that never changes */
-	return (iommu_pt->asid & (KGSL_IOMMU_CONTEXTIDR_ASID_MASK <<
-				KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT)) +
-		(iommu->asid & ~(KGSL_IOMMU_CONTEXTIDR_ASID_MASK <<
-				KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT));
-}
-
-/*
  * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb
  * of the primary context bank
  * @mmu - Pointer to mmu structure
@@ -1066,15 +983,6 @@
 			temp = KGSL_IOMMU_GET_IOMMU_REG(
 				iommu->iommu_units[i].reg_map.hostptr,
 				KGSL_IOMMU_CONTEXT_USER, TTBR0);
-			/* Set asid */
-			KGSL_IOMMU_SET_IOMMU_REG(
-				iommu->iommu_units[i].reg_map.hostptr,
-				KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR,
-				kgsl_iommu_get_hwpagetable_asid(mmu));
-			mb();
-			temp = KGSL_IOMMU_GET_IOMMU_REG(
-					iommu->iommu_units[i].reg_map.hostptr,
-					KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR);
 		}
 	}
 	/* Flush tlb */
@@ -1082,8 +990,8 @@
 		for (i = 0; i < iommu->unit_count; i++) {
 			KGSL_IOMMU_SET_IOMMU_REG(
 				iommu->iommu_units[i].reg_map.hostptr,
-				KGSL_IOMMU_CONTEXT_USER, CTX_TLBIASID,
-				kgsl_iommu_get_hwpagetable_asid(mmu));
+				KGSL_IOMMU_CONTEXT_USER, CTX_TLBIALL,
+				1);
 			mb();
 		}
 	}
@@ -1139,7 +1047,6 @@
 	.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
 	.mmu_enable_clk = kgsl_iommu_enable_clk,
 	.mmu_disable_clk_on_ts = kgsl_iommu_disable_clk_on_ts,
-	.mmu_get_hwpagetable_asid = kgsl_iommu_get_hwpagetable_asid,
 	.mmu_get_pt_lsb = kgsl_iommu_get_pt_lsb,
 	.mmu_get_reg_map_desc = kgsl_iommu_get_reg_map_desc,
 };
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index 354a5cf..f14db93 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -23,15 +23,8 @@
 #define KGSL_IOMMU_TTBR0_PA_MASK		0x0003FFFF
 #define KGSL_IOMMU_TTBR0_PA_SHIFT		14
 #define KGSL_IOMMU_CTX_TLBIALL			0x800
-#define KGSL_IOMMU_CONTEXTIDR			0x8
-#define KGSL_IOMMU_CONTEXTIDR_ASID_MASK		0xFF
-#define KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT	0
-#define KGSL_IOMMU_CTX_TLBIASID			0x804
 #define KGSL_IOMMU_CTX_SHIFT			12
 
-#define KGSL_IOMMU_MAX_ASIDS			256
-#define KGSL_IOMMU_ASID_REUSE			2
-
 /*
  * Max number of iommu units that the gpu core can have
  * On APQ8064, KGSL can control a maximum of 2 IOMMU units.
@@ -106,10 +99,6 @@
  * @clk_event_queued: Indicates whether an event to disable clocks
  * is already queued or not
  * @device: Pointer to kgsl device
- * @asids: A bit structure indicating which id's are presently used
- * @asid: Contains the initial value of IOMMU_CONTEXTIDR when a domain
- * is first attached
- * asid_reuse: Holds the number of times the reuse asid is reused
  */
 struct kgsl_iommu {
 	struct kgsl_iommu_unit iommu_units[KGSL_IOMMU_MAX_UNITS];
@@ -117,21 +106,16 @@
 	unsigned int iommu_last_cmd_ts;
 	bool clk_event_queued;
 	struct kgsl_device *device;
-	unsigned long *asids;
-	unsigned int asid;
-	unsigned int asid_reuse;
 };
 
 /*
  * struct kgsl_iommu_pt - Iommu pagetable structure private to kgsl driver
  * @domain: Pointer to the iommu domain that contains the iommu pagetable
  * @iommu: Pointer to iommu structure
- * @asid: The asid assigned to this domain
  */
 struct kgsl_iommu_pt {
 	struct iommu_domain *domain;
 	struct kgsl_iommu *iommu;
-	unsigned int asid;
 };
 
 #endif
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index d06ce45..bc6ec8e 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -136,7 +136,6 @@
 		(struct kgsl_mmu *mmu, uint32_t ts, bool ts_valid);
 	int (*mmu_enable_clk)
 		(struct kgsl_mmu *mmu, int ctx_id);
-	int (*mmu_get_hwpagetable_asid)(struct kgsl_mmu *mmu);
 	int (*mmu_get_pt_lsb)(struct kgsl_mmu *mmu,
 				unsigned int unit_id,
 				enum kgsl_iommu_context_id ctx_id);
@@ -278,14 +277,6 @@
 		return 0;
 }
 
-static inline int kgsl_mmu_get_hwpagetable_asid(struct kgsl_mmu *mmu)
-{
-	if (mmu->mmu_ops && mmu->mmu_ops->mmu_get_hwpagetable_asid)
-		return mmu->mmu_ops->mmu_get_hwpagetable_asid(mmu);
-	else
-		return 0;
-}
-
 static inline int kgsl_mmu_enable_clk(struct kgsl_mmu *mmu,
 					int ctx_id)
 {
@@ -302,4 +293,10 @@
 		mmu->mmu_ops->mmu_disable_clk_on_ts(mmu, ts, ts_valid);
 }
 
+static inline int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr)
+{
+	return ((gpuaddr >= KGSL_PAGETABLE_BASE) &&
+		(gpuaddr < (KGSL_PAGETABLE_BASE + kgsl_mmu_get_ptsize())));
+}
+
 #endif /* __KGSL_MMU_H */
diff --git a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
index 7034cb0..070222e 100644
--- a/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
+++ b/drivers/gud/mobicore_driver/platforms/msm8960_surf_std/platform.h
@@ -21,7 +21,7 @@
 #define _MC_DRV_PLATFORM_H_
 
 /** MobiCore Interrupt for Qualcomm */
-#define MC_INTR_SSIQ						218
+#define MC_INTR_SSIQ						280
 
 /** Use SMC for fastcalls */
 #define MC_SMC_FASTCALL
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index b050db2..53fec5b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -868,6 +868,16 @@
 	  Provides interface for measuring the current on specific power rails
 	  through the channels on ADC1158 ADC
 
+config SENSORS_QPNP_ADC_VOLTAGE
+	tristate "Support for Qualcomm QPNP Voltage ADC"
+	depends on SPMI
+	help
+	  This is the VADC arbiter driver for Qualcomm QPNP ADC Chip.
+
+	  The driver supports reading the HKADC, XOADC through the ADC AMUX arbiter.
+	  The VADC includes support for the conversion sequencer. The driver supports
+	  reading the ADC through the AMUX channels for external pull-ups simultaneously.
+
 config SENSORS_PC87360
 	tristate "National Semiconductor PC87360 family"
 	depends on !PPC
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 228c4e9..2ff9454 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -130,6 +130,7 @@
 obj-$(CONFIG_SENSORS_MSM_ADC)	+= msm_adc.o m_adcproc.o
 obj-$(CONFIG_SENSORS_PM8XXX_ADC)	+= pm8xxx-adc.o pm8xxx-adc-scale.o
 obj-$(CONFIG_SENSORS_EPM_ADC)	+= epm_adc.o
+obj-$(CONFIG_SENSORS_QPNP_ADC_VOLTAGE)	+= qpnp-adc-voltage.o qpnp-adc-common.o
 
 obj-$(CONFIG_PMBUS)		+= pmbus/
 
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
new file mode 100644
index 0000000..c8fe798
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -0,0 +1,258 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+
+/* Min ADC code represets 0V */
+#define QPNP_VADC_MIN_ADC_CODE			0x6000
+/* Max ADC code represents full-scale range of 1.8V */
+#define QPNP_VADC_MAX_ADC_CODE			0xA800
+
+int32_t qpnp_adc_scale_default(int32_t adc_code,
+		const struct qpnp_adc_properties *adc_properties,
+		const struct qpnp_vadc_chan_properties *chan_properties,
+		struct qpnp_vadc_result *adc_chan_result)
+{
+	bool negative_rawfromoffset = 0, negative_offset = 0;
+	int64_t scale_voltage = 0;
+
+	if (!chan_properties || !chan_properties->offset_gain_numerator ||
+		!chan_properties->offset_gain_denominator || !adc_properties
+		|| !adc_chan_result)
+		return -EINVAL;
+
+	scale_voltage = (adc_code -
+		chan_properties->adc_graph[CALIB_ABSOLUTE].adc_gnd)
+		* chan_properties->adc_graph[CALIB_ABSOLUTE].dx;
+	if (scale_voltage < 0) {
+		negative_offset = 1;
+		scale_voltage = -scale_voltage;
+	}
+	do_div(scale_voltage,
+		chan_properties->adc_graph[CALIB_ABSOLUTE].dy);
+	if (negative_offset)
+		scale_voltage = -scale_voltage;
+	scale_voltage += chan_properties->adc_graph[CALIB_ABSOLUTE].dx;
+
+	if (scale_voltage < 0) {
+		if (adc_properties->bipolar) {
+			scale_voltage = -scale_voltage;
+			negative_rawfromoffset = 1;
+		} else {
+			scale_voltage = 0;
+		}
+	}
+
+	adc_chan_result->measurement = scale_voltage *
+				chan_properties->offset_gain_denominator;
+
+	/* do_div only perform positive integer division! */
+	do_div(adc_chan_result->measurement,
+				chan_properties->offset_gain_numerator);
+
+	if (negative_rawfromoffset)
+		adc_chan_result->measurement = -adc_chan_result->measurement;
+
+	/*
+	 * Note: adc_chan_result->measurement is in the unit of
+	 * adc_properties.adc_reference. For generic channel processing,
+	 * channel measurement is a scale/ratio relative to the adc
+	 * reference input
+	 */
+	adc_chan_result->physical = adc_chan_result->measurement;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qpnp_adc_scale_default);
+
+int32_t qpnp_vadc_check_result(int32_t *data)
+{
+	if (*data < QPNP_VADC_MIN_ADC_CODE)
+		*data = QPNP_VADC_MIN_ADC_CODE;
+	else if (*data > QPNP_VADC_MAX_ADC_CODE)
+		*data = QPNP_VADC_MAX_ADC_CODE;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(qpnp_vadc_check_result);
+
+int32_t qpnp_adc_get_devicetree_data(struct spmi_device *spmi,
+			struct qpnp_adc_drv *adc_qpnp)
+{
+	struct device_node *node = spmi->dev.of_node;
+	struct resource *res;
+	struct device_node *child;
+	struct qpnp_vadc_amux *adc_channel_list;
+	struct qpnp_adc_properties *adc_prop;
+	struct qpnp_vadc_amux_properties *amux_prop;
+	int count_adc_channel_list = 0, decimation, rc = 0;
+
+	if (!node)
+		return -EINVAL;
+
+	for_each_child_of_node(node, child)
+		count_adc_channel_list++;
+
+	if (!count_adc_channel_list) {
+		pr_err("No channel listing\n");
+		return -EINVAL;
+	}
+
+	adc_qpnp->spmi = spmi;
+
+	adc_prop = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_properties),
+					GFP_KERNEL);
+	if (!adc_prop) {
+		dev_err(&spmi->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+	adc_channel_list = devm_kzalloc(&spmi->dev,
+		(sizeof(struct qpnp_vadc_amux) * count_adc_channel_list),
+				GFP_KERNEL);
+	if (!adc_channel_list) {
+		dev_err(&spmi->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	amux_prop = devm_kzalloc(&spmi->dev,
+		sizeof(struct qpnp_vadc_amux_properties) +
+		sizeof(struct qpnp_vadc_chan_properties), GFP_KERNEL);
+	if (!amux_prop) {
+		dev_err(&spmi->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	for_each_child_of_node(node, child) {
+		int channel_num, scaling, post_scaling, hw_settle_time;
+		int fast_avg_setup, calib_type, i = 0, rc;
+		const char *calibration_param, *channel_name;
+
+		channel_name = of_get_property(child,
+				"label", NULL) ? : child->name;
+		if (!channel_name) {
+			pr_err("Invalid channel name\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child, "qcom,channel-num",
+								&channel_num);
+		if (rc) {
+			pr_err("Invalid channel num\n");
+			return -EINVAL;
+		}
+		rc = of_property_read_u32(child, "qcom,decimation",
+								&decimation);
+		if (rc) {
+			pr_err("Invalid channel decimation property\n");
+			return -EINVAL;
+		}
+		rc = of_property_read_u32(child,
+				"qcom,pre-div-channel-scaling", &scaling);
+		if (rc) {
+			pr_err("Invalid channel scaling property\n");
+			return -EINVAL;
+		}
+		rc = of_property_read_u32(child,
+				"qcom,scale-function", &post_scaling);
+		if (rc) {
+			pr_err("Invalid channel post scaling property\n");
+			return -EINVAL;
+		}
+		rc = of_property_read_u32(child,
+				"qcom,hw-settle-time", &hw_settle_time);
+		if (rc) {
+			pr_err("Invalid channel hw settle time property\n");
+			return -EINVAL;
+		}
+		rc = of_property_read_u32(child,
+				"qcom,fast-avg-setup", &fast_avg_setup);
+		if (rc) {
+			pr_err("Invalid channel fast average setup\n");
+			return -EINVAL;
+		}
+		calibration_param = of_get_property(child,
+				"qcom,calibration-type", NULL);
+		if (!strncmp(calibration_param, "absolute", 8))
+			calib_type = CALIB_ABSOLUTE;
+		else if (!strncmp(calibration_param, "historical", 9))
+			calib_type = CALIB_RATIOMETRIC;
+		else {
+			pr_err("%s: Invalid calibration property\n", __func__);
+			return -EINVAL;
+		}
+		/* Individual channel properties */
+		adc_channel_list[i].name = (char *)channel_name;
+		adc_channel_list[i].channel_num = channel_num;
+		adc_channel_list[i].chan_path_prescaling = scaling;
+		adc_channel_list[i].adc_decimation = decimation;
+		adc_channel_list[i].adc_scale_fn = post_scaling;
+		adc_channel_list[i].hw_settle_time = hw_settle_time;
+		adc_channel_list[i].fast_avg_setup = fast_avg_setup;
+		i++;
+	}
+	adc_qpnp->adc_channels = adc_channel_list;
+	adc_qpnp->amux_prop = amux_prop;
+
+	/* Get the ADC VDD reference voltage and ADC bit resolution */
+	rc = of_property_read_u32(node, "qcom,adc-vdd-reference",
+			&adc_prop->adc_vdd_reference);
+	if (rc) {
+		pr_err("Invalid adc vdd reference property\n");
+		return -EINVAL;
+	}
+	rc = of_property_read_u32(node, "qcom,adc-bit-resolution",
+			&adc_prop->bitresolution);
+	if (rc) {
+		pr_err("Invalid adc bit resolution property\n");
+		return -EINVAL;
+	}
+	adc_qpnp->adc_prop = adc_prop;
+
+	/* Get the peripheral address */
+	res = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+	if (!res) {
+		pr_err("No base address definition\n");
+		return -EINVAL;
+	}
+
+	adc_qpnp->slave = spmi->sid;
+	adc_qpnp->offset = res->start;
+
+	/* Register the ADC peripheral interrupt */
+	adc_qpnp->adc_irq = spmi_get_irq(spmi, 0, 0);
+	if (adc_qpnp->adc_irq < 0) {
+		pr_err("Invalid irq\n");
+		return -ENXIO;
+	}
+
+	mutex_init(&adc_qpnp->adc_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_get_devicetree_data);
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
new file mode 100644
index 0000000..8b2cb97
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -0,0 +1,784 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+
+/* QPNP VADC register definition */
+#define QPNP_VADC_STATUS1					0x8
+#define QPNP_VADC_STATUS1_OP_MODE				4
+#define QPNP_VADC_STATUS1_MEAS_INTERVAL_EN_STS			BIT(2)
+#define QPNP_VADC_STATUS1_REQ_STS				BIT(1)
+#define QPNP_VADC_STATUS1_EOC					BIT(0)
+#define QPNP_VADC_STATUS2					0x9
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE				6
+#define QPNP_VADC_STATUS2_FIFO_NOT_EMPTY_FLAG			BIT(1)
+#define QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS			BIT(0)
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT			4
+#define QPNP_VADC_CONV_TIMEOUT_ERR				2
+
+#define QPNP_VADC_INT_SET_TYPE					0x11
+#define QPNP_VADC_INT_POLARITY_HIGH				0x12
+#define QPNP_VADC_INT_POLARITY_LOW				0x13
+#define QPNP_VADC_INT_LATCHED_CLR				0x14
+#define QPNP_VADC_INT_EN_SET					0x15
+#define QPNP_VADC_INT_CLR					0x16
+#define QPNP_VADC_INT_LOW_THR_BIT				BIT(4)
+#define QPNP_VADC_INT_HIGH_THR_BIT				BIT(3)
+#define QPNP_VADC_INT_CONV_SEQ_TIMEOUT_BIT			BIT(2)
+#define QPNP_VADC_INT_FIFO_NOT_EMPTY_BIT			BIT(1)
+#define QPNP_VADC_INT_EOC_BIT					BIT(0)
+#define QPNP_VADC_INT_CLR_MASK					0x1f
+#define QPNP_VADC_MODE_CTL					0x40
+#define QPNP_VADC_OP_MODE_SHIFT					4
+#define QPNP_VADC_VREF_XO_THM_FORCE				BIT(2)
+#define QPNP_VADC_AMUX_TRIM_EN					BIT(1)
+#define QPNP_VADC_ADC_TRIM_EN					BIT(0)
+#define QPNP_VADC_EN_CTL1					0x46
+#define QPNP_VADC_ADC_EN					BIT(7)
+#define QPNP_VADC_ADC_CH_SEL_CTL					0x48
+#define QPNP_VADC_ADC_DIG_PARAM					0x50
+#define QPNP_VADC_ADC_DIG_DEC_RATIO_SEL_SHIFT			3
+#define QPNP_VADC_HW_SETTLE_DELAY				0x51
+#define QPNP_VADC_CONV_REQ					0x52
+#define QPNP_VADC_CONV_REQ_SET					BIT(7)
+#define QPNP_VADC_CONV_SEQ_CTL					0x54
+#define QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT				4
+#define QPNP_VADC_CONV_SEQ_TRIG_CTL				0x55
+#define QPNP_VADC_CONV_SEQ_FALLING_EDGE				0x0
+#define QPNP_VADC_CONV_SEQ_RISING_EDGE				0x1
+#define QPNP_VADC_CONV_SEQ_EDGE_SHIFT				7
+#define QPNP_VADC_FAST_AVG_CTL					0x5a
+
+#define QPNP_VADC_M0_LOW_THR_LSB					0x5c
+#define QPNP_VADC_M0_LOW_THR_MSB					0x5d
+#define QPNP_VADC_M0_HIGH_THR_LSB				0x5e
+#define QPNP_VADC_M0_HIGH_THR_MSB				0x5f
+#define QPNP_VADC_M1_LOW_THR_LSB					0x69
+#define QPNP_VADC_M1_LOW_THR_MSB					0x6a
+#define QPNP_VADC_M1_HIGH_THR_LSB				0x6b
+#define QPNP_VADC_M1_HIGH_THR_MSB				0x6c
+
+#define QPNP_VADC_DATA0						0x60
+#define QPNP_VADC_DATA1						0x61
+#define QPNP_VADC_CONV_TIMEOUT_ERR				2
+#define QPNP_VADC_CONV_TIME_MIN					2000
+#define QPNP_VADC_CONV_TIME_MAX					2100
+
+#define QPNP_ADC_HWMON_NAME_LENGTH				16
+
+struct qpnp_vadc_drv {
+	struct qpnp_adc_drv		*adc;
+	struct dentry			*dent;
+	struct device			*vadc_hwmon;
+	bool				vadc_init_calib;
+	struct sensor_device_attribute		sens_attr[0];
+};
+
+struct qpnp_vadc_drv *qpnp_vadc;
+
+static struct qpnp_vadc_scale_fn vadc_scale_fn[] = {
+	[SCALE_DEFAULT] = {qpnp_adc_scale_default},
+};
+
+static int32_t qpnp_vadc_read_reg(int16_t reg, u8 *data)
+{
+	struct qpnp_vadc_drv *vadc = qpnp_vadc;
+	int rc;
+
+	rc = spmi_ext_register_readl(vadc->adc->spmi->ctrl, vadc->adc->slave,
+		reg, data, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc read reg %d failed with %d\n", reg, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_write_reg(int16_t reg, u8 data)
+{
+	struct qpnp_vadc_drv *vadc = qpnp_vadc;
+	int rc;
+	u8 *buf;
+
+	buf = &data;
+
+	rc = spmi_ext_register_writel(vadc->adc->spmi->ctrl, vadc->adc->slave,
+		reg, buf, 1);
+	if (rc < 0) {
+		pr_err("qpnp adc write reg %d failed with %d\n", reg, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_configure_interrupt(void)
+{
+	int rc = 0;
+	u8 data = 0;
+
+	/* Configure interrupt as an Edge trigger */
+	rc = qpnp_vadc_write_reg(QPNP_VADC_INT_SET_TYPE,
+					QPNP_VADC_INT_CLR_MASK);
+	if (rc < 0) {
+		pr_err("%s Interrupt configure failed\n", __func__);
+		return rc;
+	}
+
+	/* Configure interrupt for rising edge trigger */
+	rc = qpnp_vadc_write_reg(QPNP_VADC_INT_POLARITY_HIGH,
+					QPNP_VADC_INT_CLR_MASK);
+	if (rc < 0) {
+		pr_err("%s Rising edge trigger configure failed\n", __func__);
+		return rc;
+	}
+
+	/* Disable low level interrupt triggering */
+	data = QPNP_VADC_INT_CLR_MASK;
+	rc = qpnp_vadc_write_reg(QPNP_VADC_INT_POLARITY_LOW,
+					(~data & QPNP_VADC_INT_CLR_MASK));
+	if (rc < 0) {
+		pr_err("%s Setting level low to disable failed\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_enable(bool state)
+{
+	int rc = 0;
+	u8 data = 0;
+
+	data = QPNP_VADC_ADC_EN;
+	if (state) {
+		rc = qpnp_vadc_write_reg(QPNP_VADC_EN_CTL1,
+					data);
+		if (rc < 0) {
+			pr_err("VADC enable failed\n");
+			return rc;
+		}
+	} else {
+		rc = qpnp_vadc_write_reg(QPNP_VADC_EN_CTL1,
+					(~data & QPNP_VADC_ADC_EN));
+		if (rc < 0) {
+			pr_err("VADC disable failed\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+int32_t qpnp_vadc_configure(
+			struct qpnp_vadc_amux_properties *chan_prop)
+{
+	u8 decimation = 0, conv_sequence = 0, conv_sequence_trig = 0;
+	int rc = 0;
+
+	rc = qpnp_vadc_write_reg(QPNP_VADC_INT_EN_SET,
+				QPNP_VADC_INT_EOC_BIT);
+	if (rc < 0) {
+		pr_err("qpnp adc configure error for interrupt setup\n");
+		return rc;
+	}
+
+	rc = qpnp_vadc_write_reg(QPNP_VADC_MODE_CTL, chan_prop->mode_sel);
+	if (rc < 0) {
+		pr_err("qpnp adc configure error for mode selection\n");
+		return rc;
+	}
+
+	rc = qpnp_vadc_write_reg(QPNP_VADC_ADC_CH_SEL_CTL,
+						chan_prop->amux_channel);
+	if (rc < 0) {
+		pr_err("qpnp adc configure error for channel selection\n");
+		return rc;
+	}
+
+	decimation |= chan_prop->decimation <<
+				QPNP_VADC_ADC_DIG_DEC_RATIO_SEL_SHIFT;
+	rc = qpnp_vadc_write_reg(QPNP_VADC_ADC_DIG_PARAM, decimation);
+	if (rc < 0) {
+		pr_err("qpnp adc configure error for digital parameter setup\n");
+		return rc;
+	}
+
+	rc = qpnp_vadc_write_reg(QPNP_VADC_HW_SETTLE_DELAY,
+						chan_prop->hw_settle_time);
+	if (rc < 0) {
+		pr_err("qpnp adc configure error for hw settling time setup\n");
+		return rc;
+	}
+
+	if (chan_prop->mode_sel == (ADC_OP_NORMAL_MODE <<
+					QPNP_VADC_OP_MODE_SHIFT)) {
+		rc = qpnp_vadc_write_reg(QPNP_VADC_FAST_AVG_CTL,
+						chan_prop->fast_avg_setup);
+		if (rc < 0) {
+			pr_err("qpnp adc fast averaging configure error\n");
+			return rc;
+		}
+	} else if (chan_prop->mode_sel == (ADC_OP_CONVERSION_SEQUENCER <<
+					QPNP_VADC_OP_MODE_SHIFT)) {
+		conv_sequence = ((ADC_SEQ_HOLD_100US <<
+				QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT) |
+				ADC_CONV_SEQ_TIMEOUT_5MS);
+		rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_SEQ_CTL,
+							conv_sequence);
+		if (rc < 0) {
+			pr_err("qpnp adc conversion sequence error\n");
+			return rc;
+		}
+
+		conv_sequence_trig = ((QPNP_VADC_CONV_SEQ_RISING_EDGE <<
+				QPNP_VADC_CONV_SEQ_EDGE_SHIFT) |
+				chan_prop->trigger_channel);
+		rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_SEQ_TRIG_CTL,
+							conv_sequence_trig);
+		if (rc < 0) {
+			pr_err("qpnp adc conversion trigger error\n");
+			return rc;
+		}
+	}
+
+	rc = qpnp_vadc_write_reg(QPNP_VADC_CONV_REQ, QPNP_VADC_CONV_REQ_SET);
+	if (rc < 0) {
+		pr_err("qpnp adc request conversion failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(qpnp_vadc_configure);
+
+static int32_t qpnp_vadc_read_conversion_result(int32_t *data)
+{
+	uint8_t rslt_lsb, rslt_msb;
+	int rc = 0;
+
+	rc = qpnp_vadc_read_reg(QPNP_VADC_DATA0, &rslt_lsb);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed for data0 with %d\n", rc);
+		return rc;
+	}
+
+	rc = qpnp_vadc_read_reg(QPNP_VADC_DATA1, &rslt_msb);
+	if (rc < 0) {
+		pr_err("qpnp adc result read failed for data1 with %d\n", rc);
+		return rc;
+	}
+
+	*data = (rslt_msb << 8) | rslt_lsb;
+
+	rc = qpnp_vadc_check_result(data);
+	if (rc < 0) {
+		pr_err("VADC data check failed\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int32_t qpnp_vadc_read_status(int mode_sel)
+{
+	u8 status1, status2, status2_conv_seq_state;
+	u8 status_err = QPNP_VADC_CONV_TIMEOUT_ERR;
+	int rc;
+
+	switch (mode_sel) {
+	case (ADC_OP_CONVERSION_SEQUENCER << QPNP_VADC_OP_MODE_SHIFT):
+		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+		if (rc) {
+			pr_err("qpnp_vadc read mask interrupt failed\n");
+			return rc;
+		}
+
+		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS2, &status2);
+		if (rc) {
+			pr_err("qpnp_vadc read mask interrupt failed\n");
+			return rc;
+		}
+
+		if (!(status2 & ~QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS) &&
+			(status1 & (~QPNP_VADC_STATUS1_REQ_STS |
+						QPNP_VADC_STATUS1_EOC))) {
+			rc = status_err;
+			return rc;
+		}
+
+		status2_conv_seq_state = status2 >>
+					QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT;
+		if (status2_conv_seq_state != ADC_CONV_SEQ_IDLE) {
+			pr_err("qpnp vadc seq error with status %d\n",
+						status2);
+			rc = -EINVAL;
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static void qpnp_vadc_work(struct work_struct *work)
+{
+	struct qpnp_vadc_drv *vadc = qpnp_vadc;
+	int rc;
+
+	rc = qpnp_vadc_write_reg(QPNP_VADC_INT_CLR, QPNP_VADC_INT_EOC_BIT);
+	if (rc)
+		pr_err("qpnp_vadc clear mask interrupt failed with %d\n", rc);
+
+	complete(&vadc->adc->adc_rslt_completion);
+
+	return;
+}
+DECLARE_WORK(trigger_completion_work, qpnp_vadc_work);
+
+static irqreturn_t qpnp_vadc_isr(int irq, void *dev_id)
+{
+	schedule_work(&trigger_completion_work);
+
+	return IRQ_HANDLED;
+}
+
+static uint32_t qpnp_vadc_calib_device(void)
+{
+	struct qpnp_vadc_drv *vadc = qpnp_vadc;
+	struct qpnp_vadc_amux_properties conv;
+	int rc, calib_read_1, calib_read_2;
+	u8 status1 = 0;
+
+	conv.amux_channel = REF_125V;
+	conv.decimation = DECIMATION_TYPE2;
+	conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+
+	rc = qpnp_vadc_configure(&conv);
+	if (rc) {
+		pr_err("qpnp_vadc configure failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+					QPNP_VADC_STATUS1_EOC)) {
+		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+		if (rc < 0)
+			return rc;
+		usleep_range(QPNP_VADC_CONV_TIME_MIN,
+					QPNP_VADC_CONV_TIME_MAX);
+	}
+
+	rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+	if (rc) {
+		pr_err("qpnp adc read adc failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	conv.amux_channel = REF_625MV;
+	conv.decimation = DECIMATION_TYPE2;
+	conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+	rc = qpnp_vadc_configure(&conv);
+	if (rc) {
+		pr_err("qpnp adc configure failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+					QPNP_VADC_STATUS1_EOC)) {
+		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+		if (rc < 0)
+			return rc;
+		usleep_range(QPNP_VADC_CONV_TIME_MIN,
+					QPNP_VADC_CONV_TIME_MAX);
+	}
+
+	rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+	if (rc) {
+		pr_err("qpnp adc read adc failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dy =
+					(calib_read_1 - calib_read_2);
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dx
+						= QPNP_ADC_625_UV;
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_vref =
+					calib_read_1;
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_gnd =
+					calib_read_2;
+	/* Ratiometric Calibration */
+	conv.amux_channel = VDD_VADC;
+	conv.decimation = DECIMATION_TYPE2;
+	conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+	rc = qpnp_vadc_configure(&conv);
+	if (rc) {
+		pr_err("qpnp adc configure failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+					QPNP_VADC_STATUS1_EOC)) {
+		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+		if (rc < 0)
+			return rc;
+		usleep_range(QPNP_VADC_CONV_TIME_MIN,
+					QPNP_VADC_CONV_TIME_MAX);
+	}
+
+	rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+	if (rc) {
+		pr_err("qpnp adc read adc failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	conv.amux_channel = VDD_VADC;
+	conv.decimation = DECIMATION_TYPE2;
+	conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+	conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+	conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+	rc = qpnp_vadc_configure(&conv);
+	if (rc) {
+		pr_err("qpnp adc configure failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	while (status1 != (~QPNP_VADC_STATUS1_REQ_STS |
+					QPNP_VADC_STATUS1_EOC)) {
+		rc = qpnp_vadc_read_reg(QPNP_VADC_STATUS1, &status1);
+		if (rc < 0)
+			return rc;
+		usleep_range(QPNP_VADC_CONV_TIME_MIN,
+					QPNP_VADC_CONV_TIME_MAX);
+	}
+
+	rc = qpnp_vadc_read_conversion_result(&calib_read_1);
+	if (rc) {
+		pr_err("qpnp adc read adc failed with %d\n", rc);
+		goto calib_fail;
+	}
+
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy =
+					(calib_read_1 - calib_read_2);
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx =
+					vadc->adc->adc_prop->adc_vdd_reference;
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_vref =
+					calib_read_1;
+	vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd =
+					calib_read_2;
+
+calib_fail:
+	return rc;
+}
+
+int32_t qpnp_vadc_conv_seq_request(enum qpnp_vadc_trigger trigger_channel,
+					enum qpnp_vadc_channels channel,
+					struct qpnp_vadc_result *result)
+{
+	struct qpnp_vadc_drv *vadc = qpnp_vadc;
+	int rc, scale_type, amux_prescaling;
+
+	if (!vadc->vadc_init_calib) {
+		rc = qpnp_vadc_calib_device();
+		if (rc) {
+			pr_err("Calibration failed\n");
+			return rc;
+		} else
+			vadc->vadc_init_calib = true;
+	}
+
+	mutex_lock(&vadc->adc->adc_lock);
+
+	rc = qpnp_vadc_enable(true);
+	if (rc)
+		goto fail_unlock;
+
+	vadc->adc->amux_prop->amux_channel = channel;
+	vadc->adc->amux_prop->decimation =
+			vadc->adc->adc_channels[channel].adc_decimation;
+	vadc->adc->amux_prop->hw_settle_time =
+			vadc->adc->adc_channels[channel].hw_settle_time;
+	vadc->adc->amux_prop->fast_avg_setup =
+			vadc->adc->adc_channels[channel].fast_avg_setup;
+
+	if (trigger_channel < ADC_SEQ_NONE)
+		vadc->adc->amux_prop->mode_sel = (ADC_OP_CONVERSION_SEQUENCER
+						<< QPNP_VADC_OP_MODE_SHIFT);
+	else if (trigger_channel == ADC_SEQ_NONE)
+		vadc->adc->amux_prop->mode_sel = (ADC_OP_NORMAL_MODE
+						<< QPNP_VADC_OP_MODE_SHIFT);
+	else {
+		pr_err("Invalid trigger channel:%d\n", trigger_channel);
+		goto fail;
+	}
+
+	vadc->adc->amux_prop->trigger_channel = trigger_channel;
+
+	rc = qpnp_vadc_configure(vadc->adc->amux_prop);
+	if (rc) {
+		pr_info("qpnp vadc configure failed with %d\n", rc);
+		goto fail;
+	}
+
+	wait_for_completion(&vadc->adc->adc_rslt_completion);
+
+	if (trigger_channel < ADC_SEQ_NONE) {
+		rc = qpnp_vadc_read_status(vadc->adc->amux_prop->mode_sel);
+		if (rc)
+			pr_info("Conversion sequence timed out - %d\n", rc);
+	}
+
+	rc = qpnp_vadc_read_conversion_result(&result->adc_code);
+	if (rc) {
+		pr_info("qpnp vadc read adc code failed with %d\n", rc);
+		goto fail;
+	}
+
+	amux_prescaling = vadc->adc->adc_channels[channel].chan_path_prescaling;
+
+	vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+		qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+	vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+		 qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+
+	scale_type = vadc->adc->adc_channels[channel].adc_scale_fn;
+	if (scale_type >= SCALE_NONE) {
+		rc = -EBADF;
+		goto fail;
+	}
+
+	vadc_scale_fn[scale_type].chan(result->adc_code,
+		vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+fail:
+	rc = qpnp_vadc_enable(false);
+	if (rc)
+		pr_err("Disable ADC failed during configuration\n");
+
+fail_unlock:
+	mutex_unlock(&vadc->adc->adc_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_conv_seq_request);
+
+int32_t qpnp_vadc_read(enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result)
+{
+	return qpnp_vadc_conv_seq_request(ADC_SEQ_NONE,
+				channel, result);
+}
+EXPORT_SYMBOL_GPL(qpnp_vadc_read);
+
+static ssize_t qpnp_adc_show(struct device *dev,
+			struct device_attribute *devattr, char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct qpnp_vadc_result result;
+	int rc = -1;
+
+	rc = qpnp_vadc_read(attr->index, &result);
+
+	if (rc)
+		return 0;
+
+	return snprintf(buf, QPNP_ADC_HWMON_NAME_LENGTH,
+		"Result:%lld Raw:%d\n", result.physical, result.adc_code);
+}
+
+static struct sensor_device_attribute qpnp_adc_attr =
+	SENSOR_ATTR(NULL, S_IRUGO, qpnp_adc_show, NULL, 0);
+
+static int32_t qpnp_vadc_init_hwmon(struct spmi_device *spmi)
+{
+	struct qpnp_vadc_drv *vadc = qpnp_vadc;
+	struct device_node *child;
+	struct device_node *node = spmi->dev.of_node;
+	int rc = 0, i = 0, channel;
+
+	for_each_child_of_node(node, child) {
+		channel = vadc->adc->adc_channels[i].channel_num;
+		qpnp_adc_attr.index = vadc->adc->adc_channels[i].channel_num;
+		qpnp_adc_attr.dev_attr.attr.name =
+						vadc->adc->adc_channels[i].name;
+		sysfs_attr_init(&vadc->sens_attr[i].dev_attr.attr);
+		memcpy(&vadc->sens_attr[i], &qpnp_adc_attr,
+						sizeof(qpnp_adc_attr));
+		rc = device_create_file(&spmi->dev,
+				&vadc->sens_attr[i].dev_attr);
+		if (rc) {
+			dev_err(&spmi->dev,
+				"device_create_file failed for dev %s\n",
+				vadc->adc->adc_channels[i].name);
+			goto hwmon_err_sens;
+		}
+		i++;
+	}
+
+	return 0;
+hwmon_err_sens:
+	pr_info("Init HWMON failed for qpnp_adc with %d\n", rc);
+	return rc;
+}
+
+static int __devinit qpnp_vadc_probe(struct spmi_device *spmi)
+{
+	struct qpnp_vadc_drv *vadc;
+	struct qpnp_adc_drv *adc_qpnp;
+	struct device_node *node = spmi->dev.of_node;
+	struct device_node *child;
+	int rc, count_adc_channel_list = 0;
+
+	if (!node)
+		return -EINVAL;
+
+	if (qpnp_vadc) {
+		pr_err("VADC already in use\n");
+		return -EBUSY;
+	}
+
+	for_each_child_of_node(node, child)
+		count_adc_channel_list++;
+
+	if (!count_adc_channel_list) {
+		pr_err("No channel listing\n");
+		return -EINVAL;
+	}
+
+	vadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_vadc_drv) +
+		(sizeof(struct sensor_device_attribute) *
+				count_adc_channel_list), GFP_KERNEL);
+	if (!vadc) {
+		dev_err(&spmi->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	adc_qpnp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_drv),
+			GFP_KERNEL);
+	if (!adc_qpnp) {
+		dev_err(&spmi->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	vadc->adc = adc_qpnp;
+
+	rc = qpnp_adc_get_devicetree_data(spmi, vadc->adc);
+	if (rc) {
+		dev_err(&spmi->dev, "failed to read device tree\n");
+		return rc;
+	}
+
+	rc = devm_request_irq(&spmi->dev, vadc->adc->adc_irq,
+				qpnp_vadc_isr, IRQF_TRIGGER_RISING,
+				"qpnp_vadc_interrupt", vadc);
+	if (rc) {
+		dev_err(&spmi->dev,
+			"failed to request adc irq with error %d\n", rc);
+		return rc;
+	}
+
+	qpnp_vadc = vadc;
+	dev_set_drvdata(&spmi->dev, vadc);
+	rc = qpnp_vadc_init_hwmon(spmi);
+	if (rc) {
+		dev_err(&spmi->dev, "failed to initialize qpnp hwmon adc\n");
+		goto fail_free_irq;
+	}
+	vadc->vadc_hwmon = hwmon_device_register(&vadc->adc->spmi->dev);
+	vadc->vadc_init_calib = false;
+
+	rc = qpnp_vadc_configure_interrupt();
+	if (rc) {
+		dev_err(&spmi->dev, "failed to configure interrupt");
+		goto fail_free_irq;
+	}
+
+	return 0;
+
+fail_free_irq:
+	free_irq(vadc->adc->adc_irq, vadc);
+
+	return rc;
+}
+
+static int __devexit qpnp_vadc_remove(struct spmi_device *spmi)
+{
+	struct qpnp_vadc_drv *vadc = dev_get_drvdata(&spmi->dev);
+	struct device_node *node = spmi->dev.of_node;
+	struct device_node *child;
+	int i = 0;
+
+	for_each_child_of_node(node, child) {
+		device_remove_file(&spmi->dev,
+			&vadc->sens_attr[i].dev_attr);
+		i++;
+	}
+	free_irq(vadc->adc->adc_irq, vadc);
+	dev_set_drvdata(&spmi->dev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id qpnp_vadc_match_table[] = {
+	{	.compatible = "qcom,qpnp-vadc",
+	},
+	{}
+};
+
+static struct spmi_driver qpnp_vadc_driver = {
+	.driver		= {
+		.name	= "qcom,qpnp-vadc",
+		.of_match_table = qpnp_vadc_match_table,
+	},
+	.probe		= qpnp_vadc_probe,
+	.remove		= qpnp_vadc_remove,
+};
+
+static int __init qpnp_vadc_init(void)
+{
+	return spmi_driver_register(&qpnp_vadc_driver);
+}
+module_init(qpnp_vadc_init);
+
+static void __exit qpnp_vadc_exit(void)
+{
+	spmi_driver_unregister(&qpnp_vadc_driver);
+}
+module_exit(qpnp_vadc_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC Voltage ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 8fb19dc..1464dab 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -647,4 +647,11 @@
           If you say yes here you get support for Bosch Sensortec's
           acceleration sensors SMB380/BMA150.
 
+config STM_LIS3DH
+        tristate "STM LIS3DH acceleration sensor support"
+        depends on I2C=y
+        help
+          If you say yes here you get support for STMicroelectronics's
+          acceleration sensors LIS3DH.
+
 endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index eecee4e..96c9288 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -60,3 +60,4 @@
 obj-$(CONFIG_PMIC8058_OTHC)             += pmic8058-othc.o
 obj-$(CONFIG_INPUT_PMIC8058_VIBRA_MEMLESS) += pmic8058-vib-memless.o
 obj-$(CONFIG_BOSCH_BMA150)              += bma150.o
+obj-$(CONFIG_STM_LIS3DH)		+= lis3dh_acc.o
diff --git a/drivers/input/touchscreen/msm_ts.c b/drivers/input/touchscreen/msm_ts.c
index eb2e73b..e66120e 100644
--- a/drivers/input/touchscreen/msm_ts.c
+++ b/drivers/input/touchscreen/msm_ts.c
@@ -387,6 +387,7 @@
 
 	input_set_capability(ts->input_dev, EV_KEY, BTN_TOUCH);
 	set_bit(EV_ABS, ts->input_dev->evbit);
+	set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
 
 	input_set_abs_params(ts->input_dev, ABS_X, pdata->min_x, pdata->max_x,
 			     0, 0);
diff --git a/drivers/iommu/msm_iommu-v2.c b/drivers/iommu/msm_iommu-v2.c
index 26e967d..28ad0ff 100644
--- a/drivers/iommu/msm_iommu-v2.c
+++ b/drivers/iommu/msm_iommu-v2.c
@@ -51,10 +51,16 @@
 	if (ret)
 		goto fail;
 
-	if (drvdata->clk) {
-		ret = clk_prepare_enable(drvdata->clk);
-		if (ret)
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		clk_disable_unprepare(drvdata->pclk);
+
+	if (drvdata->aclk) {
+		ret = clk_prepare_enable(drvdata->aclk);
+		if (ret) {
+			clk_disable_unprepare(drvdata->clk);
 			clk_disable_unprepare(drvdata->pclk);
+		}
 	}
 fail:
 	return ret;
@@ -62,11 +68,23 @@
 
 static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
 {
-	if (drvdata->clk)
-		clk_disable_unprepare(drvdata->clk);
+	if (drvdata->aclk)
+		clk_disable_unprepare(drvdata->aclk);
+	clk_disable_unprepare(drvdata->clk);
 	clk_disable_unprepare(drvdata->pclk);
 }
 
+static void __sync_tlb(void __iomem *base, int ctx)
+{
+	SET_TLBSYNC(base, ctx, 0);
+
+	/* No barrier needed due to register proximity */
+	while (GET_CB_TLBSTATUS_SACTIVE(base, ctx))
+		cpu_relax();
+
+	/* No barrier needed due to read dependency */
+}
+
 static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
 {
 	struct msm_priv *priv = domain->priv;
@@ -92,6 +110,7 @@
 		SET_TLBIVA(iommu_drvdata->base, ctx_drvdata->num,
 			   asid | (va & CB_TLBIVA_VA));
 		mb();
+		__sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
 		__disable_clocks(iommu_drvdata);
 	}
 fail:
@@ -121,6 +140,7 @@
 
 		SET_TLBIASID(iommu_drvdata->base, ctx_drvdata->num, asid);
 		mb();
+		__sync_tlb(iommu_drvdata->base, ctx_drvdata->num);
 		__disable_clocks(iommu_drvdata);
 	}
 
diff --git a/drivers/iommu/msm_iommu_dev-v2.c b/drivers/iommu/msm_iommu_dev-v2.c
index d3a088a..8c26f95 100644
--- a/drivers/iommu/msm_iommu_dev-v2.c
+++ b/drivers/iommu/msm_iommu_dev-v2.c
@@ -69,7 +69,7 @@
 {
 	struct msm_iommu_drvdata *drvdata;
 	struct resource *r;
-	int ret;
+	int ret, needs_alt_core_clk;
 
 	if (msm_iommu_root_dev == pdev)
 		return 0;
@@ -93,55 +93,42 @@
 	if (IS_ERR(drvdata->gdsc))
 		return -EINVAL;
 
-	drvdata->pclk = clk_get(&pdev->dev, "iface_clk");
+	drvdata->pclk = devm_clk_get(&pdev->dev, "iface_clk");
 	if (IS_ERR(drvdata->pclk))
 		return PTR_ERR(drvdata->pclk);
 
-	ret = clk_prepare_enable(drvdata->pclk);
-	if (ret)
-		goto fail_enable;
+	drvdata->clk = devm_clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(drvdata->clk))
+		return PTR_ERR(drvdata->clk);
 
-	drvdata->clk = clk_get(&pdev->dev, "core_clk");
-	if (!IS_ERR(drvdata->clk)) {
-		if (clk_get_rate(drvdata->clk) == 0) {
-			ret = clk_round_rate(drvdata->clk, 1);
-			clk_set_rate(drvdata->clk, ret);
-		}
+	needs_alt_core_clk = of_property_read_bool(pdev->dev.of_node,
+						   "qcom,needs-alt-core-clk");
+	if (needs_alt_core_clk) {
+		drvdata->aclk = devm_clk_get(&pdev->dev, "alt_core_clk");
+		if (IS_ERR(drvdata->aclk))
+			return PTR_ERR(drvdata->aclk);
+	}
 
-		ret = clk_prepare_enable(drvdata->clk);
-		if (ret) {
-			clk_put(drvdata->clk);
-			goto fail_pclk;
-		}
-	} else
-		drvdata->clk = NULL;
+	if (clk_get_rate(drvdata->clk) == 0) {
+		ret = clk_round_rate(drvdata->clk, 1);
+		clk_set_rate(drvdata->clk, ret);
+	}
+
+	if (drvdata->aclk && clk_get_rate(drvdata->aclk) == 0) {
+		ret = clk_round_rate(drvdata->aclk, 1);
+		clk_set_rate(drvdata->aclk, ret);
+	}
 
 	ret = msm_iommu_parse_dt(pdev, drvdata);
 	if (ret)
-		goto fail_clk;
+		return ret;
 
 	pr_info("device %s mapped at %p, with %d ctx banks\n",
 		drvdata->name, drvdata->base, drvdata->ncb);
 
 	platform_set_drvdata(pdev, drvdata);
 
-	if (drvdata->clk)
-		clk_disable_unprepare(drvdata->clk);
-
-	clk_disable_unprepare(drvdata->pclk);
-
 	return 0;
-
-fail_clk:
-	if (drvdata->clk) {
-		clk_disable_unprepare(drvdata->clk);
-		clk_put(drvdata->clk);
-	}
-fail_pclk:
-	clk_disable_unprepare(drvdata->pclk);
-fail_enable:
-	clk_put(drvdata->pclk);
-	return ret;
 }
 
 static int __devexit msm_iommu_remove(struct platform_device *pdev)
@@ -192,7 +179,7 @@
 	 */
 	ctx_drvdata->num = ((r->start - rp.start) >> CTX_SHIFT) - 8;
 
-	if (of_property_read_string(pdev->dev.of_node, "qcom,iommu-ctx-name",
+	if (of_property_read_string(pdev->dev.of_node, "label",
 					&ctx_drvdata->name))
 		ctx_drvdata->name = dev_name(&pdev->dev);
 
@@ -232,7 +219,7 @@
 	ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
 	if (!ret)
 		dev_info(&pdev->dev, "context %s using bank %d\n",
-				dev_name(&pdev->dev), ctx_drvdata->num);
+			 ctx_drvdata->name, ctx_drvdata->num);
 
 	return ret;
 }
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index 0bdf6cb..790cc10 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -137,7 +137,12 @@
 
 	/*
 	cc = buf[3] & 0x0f;
-	ccok = ((feed->cc + 1) & 0x0f) == cc;
+	if (feed->first_cc)
+		ccok = 1;
+	else
+		ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+	feed->first_cc = 0;
 	feed->cc = cc;
 	if (!ccok)
 		printk("missed packet!\n");
@@ -351,7 +356,12 @@
 	p = 188 - count;	/* payload start */
 
 	cc = buf[3] & 0x0f;
-	ccok = ((feed->cc + 1) & 0x0f) == cc;
+	if (feed->first_cc)
+		ccok = 1;
+	else
+		ccok = ((feed->cc + 1) & 0x0f) == cc;
+
+	feed->first_cc = 0;
 	feed->cc = cc;
 
 	if (buf[3] & 0x20) {
@@ -996,6 +1006,8 @@
 		return -ENODEV;
 	}
 
+	feed->first_cc = 1;
+
 	if ((ret = demux->start_feed(feed)) < 0) {
 		mutex_unlock(&demux->mutex);
 		return ret;
@@ -1299,6 +1311,7 @@
 	dvbdmxfeed->feed.sec.secbuf = dvbdmxfeed->feed.sec.secbuf_base;
 	dvbdmxfeed->feed.sec.secbufp = 0;
 	dvbdmxfeed->feed.sec.seclen = 0;
+	dvbdmxfeed->first_cc = 1;
 
 	if (!dvbdmx->start_feed) {
 		mutex_unlock(&dvbdmx->mutex);
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
index 3970a6c..a663191 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.h
+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
@@ -98,6 +98,7 @@
 	enum dmx_ts_pes pes_type;
 
 	int cc;
+	int first_cc;
 	int pusi_seen;		/* prevents feeding of garbage from previous section */
 
 	u32 peslen;
diff --git a/drivers/media/video/msm/Kconfig b/drivers/media/video/msm/Kconfig
index fb5aea0..c5abd76 100644
--- a/drivers/media/video/msm/Kconfig
+++ b/drivers/media/video/msm/Kconfig
@@ -280,6 +280,16 @@
           and cropping image. The driver support V4L2 subdev
           APIs.
 
+config MSM_CCI
+        bool "Qualcomm MSM Camera Control Interface support"
+        depends on MSM_CAMERA
+        ---help---
+          Enable support for Camera Control Interface driver only
+          for those platforms that have hardware support. This driver
+          is responsible for handling I2C read and write on the I2C
+          bus. It is also responsible for synchronization with
+          GPIO and data frames.
+
 config QUP_EXCLUSIVE_TO_CAMERA
 	bool "QUP exclusive to camera"
 	depends on MSM_CAMERA
@@ -339,6 +349,15 @@
           of CSIPHY, CSID and ISPIF subdevices to receive data
           from sensor.
 
+config MSM_ISPIF
+        bool "Qualcomm MSM Image Signal Processing interface support"
+        depends on MSM_CAMERA
+        ---help---
+          Enable support for Image Signal Processing interface module.
+          This module acts as a crossbar between CSID and VFE. Output
+          of any CID of CSID can be routed to of of pixel or raw
+          data interface in VFE.
+
 config S5K3L1YX
 	bool "Sensor S5K3L1YX (BAYER 12M)"
 	depends on MSM_CAMERA
diff --git a/drivers/media/video/msm/Makefile b/drivers/media/video/msm/Makefile
index eb45271..3ca4198 100644
--- a/drivers/media/video/msm/Makefile
+++ b/drivers/media/video/msm/Makefile
@@ -1,42 +1,35 @@
 GCC_VERSION      := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
-ifeq ($(GCC_VERSION),0404)
-CFLAGS_REMOVE_msm_vfe8x.o = -Wframe-larger-than=1024
-endif
 
-EXTRA_CFLAGS += -Idrivers/media/video/msm/io
+ccflags-y += -Idrivers/media/video/msm/io
+ccflags-y += -Idrivers/media/video/msm/vfe
 obj-$(CONFIG_MSM_CAMERA) += io/
 ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
+  EXTRA_CFLAGS += -Idrivers/media/video/msm/cci
   EXTRA_CFLAGS += -Idrivers/media/video/msm/csi
   EXTRA_CFLAGS += -Idrivers/media/video/msm/eeprom
   EXTRA_CFLAGS += -Idrivers/media/video/msm/sensors
   EXTRA_CFLAGS += -Idrivers/media/video/msm/actuators
   EXTRA_CFLAGS += -Idrivers/media/video/msm/server
-  obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o msm_vfe_stats_buf.o
+  obj-$(CONFIG_MSM_CAMERA) += msm_isp.o msm.o msm_mem.o msm_mctl.o msm_mctl_buf.o msm_mctl_pp.o
   obj-$(CONFIG_MSM_CAMERA) += server/
   obj-$(CONFIG_MSM_CAM_IRQ_ROUTER) += msm_camirq_router.o
-  obj-$(CONFIG_MSM_CAMERA) += eeprom/ sensors/ actuators/ csi/
+  obj-$(CONFIG_MSM_CAMERA) += cci/ eeprom/ sensors/ actuators/ csi/
   obj-$(CONFIG_MSM_CPP) += cpp/
   obj-$(CONFIG_MSM_CAMERA) += msm_gesture.o
 else
   obj-$(CONFIG_MSM_CAMERA) += msm_camera.o
 endif
+obj-$(CONFIG_MSM_CAMERA) += vfe/
 obj-$(CONFIG_MSM_CAMERA) += msm_axi_qos.o gemini/ mercury/
 obj-$(CONFIG_MSM_CAMERA_FLASH) += flash.o
-obj-$(CONFIG_ARCH_MSM_ARM11) += msm_vfe7x.o
 ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
-  obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a_v4l2.o
+  obj-$(CONFIG_ARCH_MSM8X60) += msm_vpe.o
+  obj-$(CONFIG_ARCH_MSM7X30) += msm_vpe.o msm_axi_qos.o
 else
-  obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a.o
+  obj-$(CONFIG_ARCH_MSM8X60) += msm_vpe1.o
+  obj-$(CONFIG_ARCH_MSM7X30) += msm_vpe1.o
 endif
-obj-$(CONFIG_ARCH_QSD8X50) += msm_vfe8x.o msm_vfe8x_proc.o
-ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
-  obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31_v4l2.o msm_vpe.o
-  obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31_v4l2.o msm_vpe.o msm_axi_qos.o
-else
-  obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31.o msm_vpe1.o
-  obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31.o msm_vpe1.o
-endif
-obj-$(CONFIG_ARCH_MSM8960) += msm_vfe32.o msm_vpe.o
+obj-$(CONFIG_ARCH_MSM8960) += msm_vpe.o
 obj-$(CONFIG_MT9T013) += mt9t013.o mt9t013_reg.o
 obj-$(CONFIG_SN12M0PZ) += sn12m0pz.o sn12m0pz_reg.o
 obj-$(CONFIG_MT9P012) += mt9p012_reg.o
diff --git a/drivers/media/video/msm/cci/Makefile b/drivers/media/video/msm/cci/Makefile
new file mode 100644
index 0000000..195a1b2f
--- /dev/null
+++ b/drivers/media/video/msm/cci/Makefile
@@ -0,0 +1,3 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm -Idrivers/media/video/msm/server
+obj-$(CONFIG_MSM_CCI) += msm_cci.o
diff --git a/drivers/media/video/msm/cci/msm_cam_cci_hwreg.h b/drivers/media/video/msm/cci/msm_cam_cci_hwreg.h
new file mode 100644
index 0000000..68c78d5
--- /dev/null
+++ b/drivers/media/video/msm/cci/msm_cam_cci_hwreg.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CAM_CCI_HWREG__
+#define __MSM_CAM_CCI_HWREG__
+
+#define CCI_HW_VERSION_ADDR                                         0x00000000
+#define CCI_RESET_CMD_ADDR                                          0x00000004
+#define CCI_RESET_CMD_RMSK                                          0xcf73f3f7
+#define CCI_M0_RESET_RMSK                                                0x3F1
+#define CCI_M1_RESET_RMSK                                              0x3F001
+#define CCI_QUEUE_START_ADDR                                        0x00000008
+#define CCI_SET_CID_SYNC_TIMER_0_ADDR                               0x00000010
+#define CCI_I2C_M0_SCL_CTL_ADDR                                     0x00000100
+#define CCI_I2C_M0_SDA_CTL_0_ADDR                                   0x00000104
+#define CCI_I2C_M0_SDA_CTL_1_ADDR                                   0x00000108
+#define CCI_I2C_M0_SDA_CTL_2_ADDR                                   0x0000010c
+#define CCI_I2C_M0_READ_DATA_ADDR                                   0x00000118
+#define CCI_I2C_M0_MISC_CTL_ADDR                                    0x00000110
+#define CCI_HALT_REQ_ADDR                                           0x00000034
+#define CCI_M0_HALT_REQ_RMSK                                               0x1
+#define CCI_M1_HALT_REQ_RMSK                                              0x01
+#define CCI_HALT_REQ_ADDR                                           0x00000034
+#define CCI_I2C_M1_SCL_CTL_ADDR                                     0x00000200
+#define CCI_I2C_M1_SDA_CTL_0_ADDR                                   0x00000204
+#define CCI_I2C_M1_SDA_CTL_1_ADDR                                   0x00000208
+#define CCI_I2C_M1_SDA_CTL_2_ADDR                                   0x0000020c
+#define CCI_I2C_M1_MISC_CTL_ADDR                                    0x00000210
+#define CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR                             0x00000304
+#define CCI_I2C_M0_Q0_CUR_CMD_ADDR                                  0x00000308
+#define CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR                            0x00000300
+#define CCI_I2C_M0_Q0_LOAD_DATA_ADDR                                0x00000310
+#define CCI_IRQ_MASK_0_ADDR                                         0x00000c04
+#define CCI_IRQ_CLEAR_0_ADDR                                        0x00000c08
+#define CCI_IRQ_STATUS_0_ADDR                                       0x00000c0c
+#define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR_BMSK                    0x40000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR_BMSK                    0x20000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR_BMSK                    0x10000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR_BMSK                     0x8000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK                   0x4000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK                   0x2000000
+#define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK                           0x1000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK                        0x100000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK                         0x10000
+#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK                            0x1000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK                           0x100
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK                            0x10
+#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK                               0x1
+#define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR                               0x00000c00
+#endif /* __MSM_CAM_CCI_HWREG__ */
diff --git a/drivers/media/video/msm/cci/msm_cci.c b/drivers/media/video/msm/cci/msm_cci.c
new file mode 100644
index 0000000..ad3cc6a
--- /dev/null
+++ b/drivers/media/video/msm/cci/msm_cci.c
@@ -0,0 +1,748 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <mach/board.h>
+#include <mach/camera.h>
+#include <media/msm_isp.h>
+#include "msm_cci.h"
+#include "msm.h"
+#include "msm_cam_server.h"
+#include "msm_cam_cci_hwreg.h"
+
+#define V4L2_IDENT_CCI 50005
+#define CCI_I2C_QUEUE_0_SIZE 2
+#define CCI_I2C_QUEUE_1_SIZE 16
+
+#undef CDBG
+#define CDBG pr_debug
+
+static void msm_cci_set_clk_param(struct cci_device *cci_dev)
+{
+	uint16_t THIGH = 78;
+	uint16_t TLOW = 114;
+	uint16_t TSUSTO = 28;
+	uint16_t TSUSTA = 28;
+	uint16_t THDDAT = 10;
+	uint16_t THDSTA = 77;
+	uint16_t TBUF = 118;
+	uint8_t HW_SCL_STRETCH_EN = 0; /*enable or disable SCL clock
+					* stretching */
+	uint8_t HW_RDHLD = 6; /* internal hold time 1-6 cycles of SDA to bridge
+			       * undefined falling SCL region */
+	uint8_t HW_TSP = 1; /* glitch filter 1-3 cycles */
+
+	msm_camera_io_w(THIGH << 16 | TLOW, cci_dev->base +
+		CCI_I2C_M0_SCL_CTL_ADDR);
+	msm_camera_io_w(TSUSTO << 16 | TSUSTA, cci_dev->base +
+		CCI_I2C_M0_SDA_CTL_0_ADDR);
+	msm_camera_io_w(THDDAT << 16 | THDSTA, cci_dev->base +
+		CCI_I2C_M0_SDA_CTL_1_ADDR);
+	msm_camera_io_w(TBUF, cci_dev->base +
+		CCI_I2C_M0_SDA_CTL_2_ADDR);
+	msm_camera_io_w(HW_SCL_STRETCH_EN << 8 | HW_RDHLD << 4 | HW_TSP,
+		cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
+	msm_camera_io_w(THIGH << 16 | TLOW, cci_dev->base +
+		CCI_I2C_M1_SCL_CTL_ADDR);
+	msm_camera_io_w(TSUSTO << 16 | TSUSTA, cci_dev->base +
+		CCI_I2C_M1_SDA_CTL_0_ADDR);
+	msm_camera_io_w(THDDAT << 16 | THDSTA, cci_dev->base +
+		CCI_I2C_M1_SDA_CTL_1_ADDR);
+	msm_camera_io_w(TBUF, cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+	msm_camera_io_w(HW_SCL_STRETCH_EN << 8 | HW_RDHLD << 4 | HW_TSP,
+		cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
+}
+
+static int32_t msm_cci_i2c_config_sync_timer(struct v4l2_subdev *sd,
+	struct msm_camera_cci_ctrl *c_ctrl)
+{
+	struct cci_device *cci_dev;
+	cci_dev = v4l2_get_subdevdata(sd);
+	msm_camera_io_w(c_ctrl->cci_info->cid, cci_dev->base +
+		CCI_SET_CID_SYNC_TIMER_0_ADDR + (c_ctrl->cci_info->cid * 0x4));
+	return 0;
+}
+
+static int32_t msm_cci_i2c_set_freq(struct v4l2_subdev *sd,
+	struct msm_camera_cci_ctrl *c_ctrl)
+{
+	struct cci_device *cci_dev;
+	uint32_t val;
+	cci_dev = v4l2_get_subdevdata(sd);
+	val = c_ctrl->cci_info->freq;
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR +
+		c_ctrl->cci_info->cci_i2c_master*0x100);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR +
+		c_ctrl->cci_info->cci_i2c_master*0x100);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR +
+		c_ctrl->cci_info->cci_i2c_master*0x100);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR +
+		c_ctrl->cci_info->cci_i2c_master*0x100);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR +
+		c_ctrl->cci_info->cci_i2c_master*0x100);
+	return 0;
+}
+
+static int32_t msm_cci_validate_queue(struct cci_device *cci_dev,
+	uint32_t len,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+	uint32_t read_val = 0;
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	read_val = msm_camera_io_r(cci_dev->base +
+		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+	CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n",
+		__func__, __LINE__, read_val, len,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+	if ((read_val + len + 1) > cci_dev->
+		cci_i2c_queue_info[master][queue].max_queue_size) {
+		uint32_t reg_val = 0;
+		uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+		CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+		msm_camera_io_w(report_val,
+			cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+			reg_offset);
+		read_val++;
+		CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n",
+			__func__, __LINE__, read_val);
+		msm_camera_io_w(read_val, cci_dev->base +
+			CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+		reg_val = 1 << ((master * 2) + queue);
+		CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+		msm_camera_io_w(reg_val, cci_dev->base + CCI_QUEUE_START_ADDR);
+		CDBG("%s line %d wait_for_completion_interruptible\n",
+			__func__, __LINE__);
+		wait_for_completion_interruptible(&cci_dev->
+			cci_master_info[master].reset_complete);
+		rc = cci_dev->cci_master_info[master].status;
+		if (rc < 0)
+			pr_err("%s failed rc %d\n", __func__, rc);
+	}
+	return rc;
+}
+
+static int32_t msm_cci_data_queue(struct cci_device *cci_dev,
+	struct msm_camera_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue)
+{
+	uint16_t i = 0, j = 0, k = 0, h = 0, len = 0;
+	uint32_t cmd = 0;
+	uint8_t data[10];
+	uint16_t reg_addr = 0;
+	struct msm_camera_cci_i2c_write_cfg *i2c_msg =
+		&c_ctrl->cfg.cci_i2c_write_cfg;
+	uint16_t cmd_size = i2c_msg->size;
+	struct msm_camera_i2c_reg_conf *i2c_cmd = i2c_msg->reg_conf_tbl;
+	enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+	CDBG("%s addr type %d data type %d\n", __func__,
+		i2c_msg->addr_type, i2c_msg->data_type);
+	/* assume total size within the max queue */
+	while (cmd_size) {
+		CDBG("%s cmd_size %d addr 0x%x data 0x%x", __func__,
+			cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
+		data[i++] = CCI_I2C_WRITE_CMD;
+		if (i2c_cmd->reg_addr)
+			reg_addr = i2c_cmd->reg_addr;
+		/* either byte or word addr */
+		if (i2c_msg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
+			data[i++] = reg_addr;
+		else {
+			data[i++] = (reg_addr & 0xFF00) >> 8;
+			data[i++] = reg_addr & 0x00FF;
+		}
+		/* max of 10 data bytes */
+		do {
+			if (i2c_msg->data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+				data[i++] = i2c_cmd->reg_data;
+				reg_addr++;
+			} else {
+				if ((i + 1) <= 10) {
+					data[i++] = (i2c_cmd->reg_data &
+						0xFF00) >> 8; /* MSB */
+					data[i++] = i2c_cmd->reg_data &
+						0x00FF; /* LSB */
+					reg_addr += 2;
+				} else
+					break;
+			}
+			i2c_cmd++;
+		} while (--cmd_size && !i2c_cmd->reg_addr && (i <= 10));
+		data[0] |= ((i-1) << 4);
+		len = ((i-1)/4) + 1;
+		msm_cci_validate_queue(cci_dev, len, master, queue);
+		for (h = 0, k = 0; h < len; h++) {
+			cmd = 0;
+			for (j = 0; (j < 4 && k < i); j++)
+				cmd |= (data[k++] << (j * 8));
+			CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
+				__func__, cmd);
+			msm_camera_io_w(cmd, cci_dev->base +
+				CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+				master * 0x200 + queue * 0x100);
+		}
+		i = 0;
+	}
+	return 0;
+}
+
+static int32_t msm_cci_write_i2c_queue(struct cci_device *cci_dev,
+	uint32_t val,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	CDBG("%s:%d called\n", __func__, __LINE__);
+	msm_cci_validate_queue(cci_dev, 1, master, queue);
+	CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val %x:%x\n",
+		__func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset, val);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset);
+	return rc;
+}
+
+static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
+	struct msm_camera_cci_ctrl *c_ctrl)
+{
+	uint32_t rc = 0;
+	uint32_t val = 0;
+	int32_t read_bytes = 0;
+	int32_t index = 0, first_byte = 0;
+	uint32_t i = 0;
+	enum cci_i2c_master_t master;
+	enum cci_i2c_queue_t queue = QUEUE_1;
+	struct cci_device *cci_dev = NULL;
+	struct msm_camera_cci_i2c_read_cfg *read_cfg = NULL;
+	CDBG("%s line %d\n", __func__, __LINE__);
+	cci_dev = v4l2_get_subdevdata(sd);
+	master = c_ctrl->cci_info->cci_i2c_master;
+	read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+	mutex_lock(&cci_dev->cci_master_info[master].mutex);
+	CDBG("%s master %d, queue %d\n", __func__, master, queue);
+	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+		c_ctrl->cci_info->retries << 16 |
+		c_ctrl->cci_info->id_map << 18;
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = CCI_I2C_LOCK_CMD;
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	if (read_cfg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
+		val = CCI_I2C_WRITE_CMD | (read_cfg->addr_type << 4) |
+			((read_cfg->addr & 0xFF) << 8);
+	if (read_cfg->addr_type == MSM_CAMERA_I2C_WORD_ADDR)
+		val = CCI_I2C_WRITE_CMD | (read_cfg->addr_type << 4) |
+			(((read_cfg->addr & 0xFF00) >> 8) << 8) |
+			((read_cfg->addr & 0xFF) << 16);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = CCI_I2C_UNLOCK_CMD;
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = msm_camera_io_r(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR +
+		master * 0x200 + queue * 0x100);
+	CDBG("%s cur word cnt %x\n", __func__, val);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR +
+		master * 0x200 + queue * 0x100);
+
+	val = 1 << ((master * 2) + queue);
+	msm_camera_io_w(val, cci_dev->base + CCI_QUEUE_START_ADDR);
+
+	wait_for_completion_interruptible(&cci_dev->
+		cci_master_info[master].reset_complete);
+
+	read_bytes = (read_cfg->num_byte / 4) + 1;
+	index = 0;
+	CDBG("%s index %d num_type %d\n", __func__, index,
+		read_cfg->num_byte);
+	first_byte = 0;
+	do {
+		val = msm_camera_io_r(cci_dev->base +
+			CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+		CDBG("%s read val %x\n", __func__, val);
+		for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
+			CDBG("%s i %d index %d\n", __func__, i, index);
+			if (!first_byte) {
+				CDBG("%s sid %x\n", __func__, val & 0xFF);
+				first_byte++;
+			} else {
+				read_cfg->data[index] =
+					(val  >> (i * 8)) & 0xFF;
+				CDBG("%s data[%d] %x\n", __func__, index,
+					read_cfg->data[index]);
+				index++;
+			}
+		}
+	} while (--read_bytes > 0);
+ERROR:
+	mutex_unlock(&cci_dev->cci_master_info[master].mutex);
+	return rc;
+}
+
+static int32_t msm_cci_i2c_write(struct v4l2_subdev *sd,
+	struct msm_camera_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	struct cci_device *cci_dev;
+	uint32_t val;
+	enum cci_i2c_master_t master;
+	enum cci_i2c_queue_t queue = QUEUE_0;
+	cci_dev = v4l2_get_subdevdata(sd);
+	master = c_ctrl->cci_info->cci_i2c_master;
+	CDBG("%s master %d, queue %d\n", __func__, master, queue);
+	mutex_lock(&cci_dev->cci_master_info[master].mutex);
+	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+		c_ctrl->cci_info->retries << 16 |
+		c_ctrl->cci_info->id_map << 18;
+	CDBG("%s:%d CCI_I2C_SET_PARAM_CMD\n", __func__, __LINE__);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = CCI_I2C_LOCK_CMD;
+	CDBG("%s:%d CCI_I2C_LOCK_CMD\n", __func__, __LINE__);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	msm_cci_data_queue(cci_dev, c_ctrl, queue);
+	val = CCI_I2C_UNLOCK_CMD;
+	CDBG("%s:%d CCI_I2C_UNLOCK_CMD\n", __func__, __LINE__);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = CCI_I2C_REPORT_CMD | (1 << 8);
+	CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+	rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CDBG("%s failed line %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+
+	val = msm_camera_io_r(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR +
+		master * 0x200 + queue * 0x100);
+	CDBG("%s line %d size of queue %d\n", __func__, __LINE__, val);
+	CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR\n", __func__, __LINE__);
+	msm_camera_io_w(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR +
+		master * 0x200 + queue * 0x100);
+
+	val = 1 << ((master * 2) + queue);
+	CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+	msm_camera_io_w(val, cci_dev->base + CCI_QUEUE_START_ADDR +
+		master*0x200 + queue * 0x100);
+
+	CDBG("%s line %d wait_for_completion_interruptible\n",
+		__func__, __LINE__);
+	wait_for_completion_interruptible(&cci_dev->
+		cci_master_info[master].reset_complete);
+ERROR:
+	mutex_unlock(&cci_dev->cci_master_info[master].mutex);
+	return rc;
+}
+
+static int msm_cci_subdev_g_chip_ident(struct v4l2_subdev *sd,
+			struct v4l2_dbg_chip_ident *chip)
+{
+	BUG_ON(!chip);
+	chip->ident = V4L2_IDENT_CCI;
+	chip->revision = 0;
+	return 0;
+}
+
+static struct msm_cam_clk_info cci_clk_info[] = {
+	{"cci_clk_src", 19200000},
+};
+
+static int32_t msm_cci_init(struct v4l2_subdev *sd)
+{
+	int rc = 0;
+	struct cci_device *cci_dev;
+	cci_dev = v4l2_get_subdevdata(sd);
+	CDBG("%s line %d\n", __func__, __LINE__);
+	if (cci_dev == NULL) {
+		rc = -ENOMEM;
+		return rc;
+	}
+	rc = msm_cam_clk_enable(&cci_dev->pdev->dev, cci_clk_info,
+		cci_dev->cci_clk, ARRAY_SIZE(cci_clk_info), 1);
+	if (rc < 0) {
+		CDBG("%s: regulator enable failed\n", __func__);
+		goto clk_enable_failed;
+	}
+
+	enable_irq(cci_dev->irq->start);
+	cci_dev->hw_version = msm_camera_io_r(cci_dev->base +
+		CCI_HW_VERSION_ADDR);
+	cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+	msm_camera_io_w(0xFFFFFFFF, cci_dev->base + CCI_RESET_CMD_ADDR);
+	msm_camera_io_w(0x1, cci_dev->base + CCI_RESET_CMD_ADDR);
+	wait_for_completion_interruptible(&cci_dev->cci_master_info[MASTER_0].
+		reset_complete);
+	msm_cci_set_clk_param(cci_dev);
+	msm_camera_io_w(0xFFFFFFFF, cci_dev->base + CCI_IRQ_MASK_0_ADDR);
+	msm_camera_io_w(0xFFFFFFFF, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+	msm_camera_io_w(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+	msm_camera_io_w(0x0, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+	msm_camera_io_w(0x0, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+	return 0;
+
+clk_enable_failed:
+	return rc;
+}
+
+static int32_t msm_cci_release(struct v4l2_subdev *sd)
+{
+	struct cci_device *cci_dev;
+	cci_dev = v4l2_get_subdevdata(sd);
+
+	disable_irq(cci_dev->irq->start);
+
+	msm_cam_clk_enable(&cci_dev->pdev->dev, cci_clk_info,
+		cci_dev->cci_clk, ARRAY_SIZE(cci_clk_info), 0);
+
+	return 0;
+}
+
+static int32_t msm_cci_config(struct v4l2_subdev *sd,
+	struct msm_camera_cci_ctrl *cci_ctrl)
+{
+	int32_t rc = 0;
+	CDBG("%s line %d cmd %d\n", __func__, __LINE__,
+		cci_ctrl->cmd);
+	switch (cci_ctrl->cmd) {
+	case MSM_CCI_INIT:
+		rc = msm_cci_init(sd);
+		break;
+	case MSM_CCI_RELEASE:
+		rc = msm_cci_release(sd);
+		break;
+	case MSM_CCI_SET_SID:
+		break;
+	case MSM_CCI_SET_FREQ:
+		rc = msm_cci_i2c_set_freq(sd, cci_ctrl);
+		break;
+	case MSM_CCI_SET_SYNC_CID:
+		rc = msm_cci_i2c_config_sync_timer(sd, cci_ctrl);
+		break;
+	case MSM_CCI_I2C_READ:
+		rc = msm_cci_i2c_read(sd, cci_ctrl);
+		break;
+	case MSM_CCI_I2C_WRITE:
+		rc = msm_cci_i2c_write(sd, cci_ctrl);
+		break;
+	case MSM_CCI_GPIO_WRITE:
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+	}
+	CDBG("%s line %d rc %d\n", __func__, __LINE__, rc);
+	cci_ctrl->status = rc;
+	return rc;
+}
+
+static irqreturn_t msm_cci_irq(int irq_num, void *data)
+{
+	uint32_t irq;
+	struct cci_device *cci_dev = data;
+	irq = msm_camera_io_r(cci_dev->base + CCI_IRQ_STATUS_0_ADDR);
+	msm_camera_io_w(irq, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+	msm_camera_io_w(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+	msm_camera_io_w(0x0, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+	CDBG("%s CCI_I2C_M0_STATUS_ADDR = 0x%x\n", __func__, irq);
+	if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
+		if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
+			cci_dev->cci_master_info[MASTER_0].reset_pending =
+				FALSE;
+			complete(&cci_dev->cci_master_info[MASTER_0].
+				reset_complete);
+		}
+		if (cci_dev->cci_master_info[MASTER_1].reset_pending == TRUE) {
+			cci_dev->cci_master_info[MASTER_1].reset_pending =
+				FALSE;
+			complete(&cci_dev->cci_master_info[MASTER_1].
+				reset_complete);
+		}
+	} else if ((irq & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK)) {
+		cci_dev->cci_master_info[MASTER_0].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+	} else if ((irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK)) {
+		cci_dev->cci_master_info[MASTER_1].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+	} else if ((irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR_BMSK)) {
+		cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
+		msm_camera_io_w(CCI_M0_HALT_REQ_RMSK,
+			cci_dev->base + CCI_HALT_REQ_ADDR);
+	} else if ((irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR_BMSK) ||
+		(irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR_BMSK)) {
+		cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
+		msm_camera_io_w(CCI_M1_HALT_REQ_RMSK,
+			cci_dev->base + CCI_HALT_REQ_ADDR);
+	} else if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
+		cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+		msm_camera_io_w(CCI_M0_RESET_RMSK,
+			cci_dev->base + CCI_RESET_CMD_ADDR);
+	} else if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
+		cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
+		msm_camera_io_w(CCI_M1_RESET_RMSK,
+			cci_dev->base + CCI_RESET_CMD_ADDR);
+	}
+	return IRQ_HANDLED;
+}
+
+int msm_cci_irq_routine(struct v4l2_subdev *sd, u32 status, bool *handled)
+{
+	struct cci_device *cci_dev = v4l2_get_subdevdata(sd);
+	irqreturn_t ret;
+	CDBG("%s line %d\n", __func__, __LINE__);
+	ret = msm_cci_irq(cci_dev->irq->start, cci_dev);
+	*handled = TRUE;
+	return 0;
+}
+
+static long msm_cci_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int32_t rc = 0;
+	CDBG("%s line %d\n", __func__, __LINE__);
+	switch (cmd) {
+	case VIDIOC_MSM_CCI_CFG:
+		rc = msm_cci_config(sd, arg);
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+	}
+	CDBG("%s line %d rc %d\n", __func__, __LINE__, rc);
+	return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_cci_subdev_core_ops = {
+	.g_chip_ident = &msm_cci_subdev_g_chip_ident,
+	.ioctl = &msm_cci_subdev_ioctl,
+	.interrupt_service_routine = msm_cci_irq_routine,
+};
+
+static const struct v4l2_subdev_ops msm_cci_subdev_ops = {
+	.core = &msm_cci_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_cci_internal_ops;
+
+static void msm_cci_initialize_cci_params(struct cci_device *new_cci_dev)
+{
+	uint8_t i = 0, j = 0;
+	for (i = 0; i < NUM_MASTERS; i++) {
+		new_cci_dev->cci_master_info[i].status = 0;
+		mutex_init(&new_cci_dev->cci_master_info[i].mutex);
+		init_completion(&new_cci_dev->
+			cci_master_info[i].reset_complete);
+		for (j = 0; j < NUM_QUEUES; j++) {
+			if (j == QUEUE_0)
+				new_cci_dev->cci_i2c_queue_info[i][j].
+					max_queue_size = CCI_I2C_QUEUE_0_SIZE;
+			else
+				new_cci_dev->cci_i2c_queue_info[i][j].
+					max_queue_size = CCI_I2C_QUEUE_1_SIZE;
+			}
+	}
+	return;
+}
+
+static int __devinit msm_cci_probe(struct platform_device *pdev)
+{
+	struct cci_device *new_cci_dev;
+	int rc = 0;
+	struct msm_cam_subdev_info sd_info;
+	struct intr_table_entry irq_req;
+	CDBG("%s: pdev %p device id = %d\n", __func__, pdev, pdev->id);
+	new_cci_dev = kzalloc(sizeof(struct cci_device), GFP_KERNEL);
+	if (!new_cci_dev) {
+		CDBG("%s: no enough memory\n", __func__);
+		return -ENOMEM;
+	}
+	v4l2_subdev_init(&new_cci_dev->subdev, &msm_cci_subdev_ops);
+	new_cci_dev->subdev.internal_ops = &msm_cci_internal_ops;
+	new_cci_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+	snprintf(new_cci_dev->subdev.name,
+			ARRAY_SIZE(new_cci_dev->subdev.name), "msm_cci");
+	v4l2_set_subdevdata(&new_cci_dev->subdev, new_cci_dev);
+	platform_set_drvdata(pdev, &new_cci_dev->subdev);
+	CDBG("%s sd %p\n", __func__, &new_cci_dev->subdev);
+	if (pdev->dev.of_node)
+		of_property_read_u32((&pdev->dev)->of_node,
+			"cell-index", &pdev->id);
+
+	new_cci_dev->mem = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "cci");
+	if (!new_cci_dev->mem) {
+		CDBG("%s: no mem resource?\n", __func__);
+		rc = -ENODEV;
+		goto cci_no_resource;
+	}
+	new_cci_dev->irq = platform_get_resource_byname(pdev,
+					IORESOURCE_IRQ, "cci");
+	CDBG("%s line %d cci irq start %d end %d\n", __func__,
+		__LINE__,
+		new_cci_dev->irq->start,
+		new_cci_dev->irq->end);
+	if (!new_cci_dev->irq) {
+		CDBG("%s: no irq resource?\n", __func__);
+		rc = -ENODEV;
+		goto cci_no_resource;
+	}
+	new_cci_dev->io = request_mem_region(new_cci_dev->mem->start,
+		resource_size(new_cci_dev->mem), pdev->name);
+	if (!new_cci_dev->io) {
+		CDBG("%s: no valid mem region\n", __func__);
+		rc = -EBUSY;
+		goto cci_no_resource;
+	}
+
+	new_cci_dev->base = ioremap(new_cci_dev->mem->start,
+		resource_size(new_cci_dev->mem));
+	if (!new_cci_dev->base) {
+		rc = -ENOMEM;
+		goto cci_release_mem;
+	}
+
+	sd_info.sdev_type = CCI_DEV;
+	sd_info.sd_index = pdev->id;
+	sd_info.irq_num = new_cci_dev->irq->start;
+	msm_cam_register_subdev_node(&new_cci_dev->subdev, &sd_info);
+
+	irq_req.cam_hw_idx       = MSM_CAM_HW_CCI;
+	irq_req.dev_name         = "msm_cci";
+	irq_req.irq_idx          = CAMERA_SS_IRQ_1;
+	irq_req.irq_num          = new_cci_dev->irq->start;
+	irq_req.is_composite     = 0;
+	irq_req.irq_trigger_type = IRQF_TRIGGER_RISING;
+	irq_req.num_hwcore       = 1;
+	irq_req.subdev_list[0]   = &new_cci_dev->subdev;
+	irq_req.data             = (void *)new_cci_dev;
+	rc = msm_cam_server_request_irq(&irq_req);
+	if (rc == -ENXIO) {
+		/* IRQ Router hardware is not present on this hardware.
+		 * Request for the IRQ and register the interrupt handler. */
+		rc = request_irq(new_cci_dev->irq->start, msm_cci_irq,
+			IRQF_TRIGGER_RISING, "cci", new_cci_dev);
+		if (rc < 0) {
+			CDBG("%s: irq request fail\n", __func__);
+			rc = -EBUSY;
+			goto cci_release_mem;
+		}
+		disable_irq(new_cci_dev->irq->start);
+	} else if (rc < 0) {
+		CDBG("%s Error registering irq ", __func__);
+		rc = -EBUSY;
+		goto cci_release_mem;
+	}
+
+	CDBG("%s line %d cci irq start %d end %d\n", __func__,
+		__LINE__,
+		new_cci_dev->irq->start,
+		new_cci_dev->irq->end);
+
+	new_cci_dev->pdev = pdev;
+	msm_cci_initialize_cci_params(new_cci_dev);
+
+	CDBG("%s line %d\n", __func__, __LINE__);
+	return 0;
+
+cci_release_mem:
+	release_mem_region(new_cci_dev->mem->start,
+		resource_size(new_cci_dev->mem));
+cci_no_resource:
+	kfree(new_cci_dev);
+	return 0;
+}
+
+static int __exit msm_cci_exit(struct platform_device *pdev)
+{
+	struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+	struct cci_device *cci_dev =
+		v4l2_get_subdevdata(subdev);
+	release_mem_region(cci_dev->mem->start, resource_size(cci_dev->mem));
+	kfree(cci_dev);
+	return 0;
+}
+
+static const struct of_device_id msm_cci_dt_match[] = {
+	{.compatible = "qcom,cci"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, msm_cci_dt_match);
+
+static struct platform_driver cci_driver = {
+	.probe = msm_cci_probe,
+	.remove = msm_cci_exit,
+	.driver = {
+		.name = MSM_CCI_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cci_dt_match,
+	},
+};
+
+static int __init msm_cci_init_module(void)
+{
+	return platform_driver_register(&cci_driver);
+}
+
+static void __exit msm_cci_exit_module(void)
+{
+	platform_driver_unregister(&cci_driver);
+}
+
+module_init(msm_cci_init_module);
+module_exit(msm_cci_exit_module);
+MODULE_DESCRIPTION("MSM CCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/cci/msm_cci.h b/drivers/media/video/msm/cci/msm_cci.h
new file mode 100644
index 0000000..6955576
--- /dev/null
+++ b/drivers/media/video/msm/cci/msm_cci.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CCI_H
+#define MSM_CCI_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <mach/camera.h>
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE  1
+#define FALSE 0
+
+struct msm_camera_cci_master_info {
+	uint32_t status;
+	uint8_t reset_pending;
+	struct mutex mutex;
+	struct completion reset_complete;
+};
+
+struct cci_device {
+	struct platform_device *pdev;
+	struct v4l2_subdev subdev;
+	struct resource *mem;
+	struct resource *irq;
+	struct resource *io;
+	void __iomem *base;
+	uint32_t hw_version;
+	struct clk *cci_clk[5];
+	struct msm_camera_cci_i2c_queue_info
+		cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
+	struct msm_camera_cci_master_info cci_master_info[NUM_MASTERS];
+};
+
+enum msm_cci_i2c_cmd_type {
+	CCI_I2C_SET_PARAM_CMD = 1,
+	CCI_I2C_WAIT_CMD,
+	CCI_I2C_WAIT_SYNC_CMD,
+	CCI_I2C_WAIT_GPIO_EVENT_CMD,
+	CCI_I2C_TRIG_I2C_EVENT_CMD,
+	CCI_I2C_LOCK_CMD,
+	CCI_I2C_UNLOCK_CMD,
+	CCI_I2C_REPORT_CMD,
+	CCI_I2C_WRITE_CMD,
+	CCI_I2C_READ_CMD,
+	CCI_I2C_WRITE_DISABLE_P_CMD,
+	CCI_I2C_READ_DISABLE_P_CMD,
+	CCI_I2C_WRITE_CMD2,
+	CCI_I2C_WRITE_CMD3,
+	CCI_I2C_REPEAT_CMD,
+	CCI_I2C_INVALID_CMD,
+};
+
+enum msm_cci_gpio_cmd_type {
+	CCI_GPIO_SET_PARAM_CMD = 1,
+	CCI_GPIO_WAIT_CMD,
+	CCI_GPIO_WAIT_SYNC_CMD,
+	CCI_GPIO_WAIT_GPIO_IN_EVENT_CMD,
+	CCI_GPIO_WAIT_I2C_Q_TRIG_EVENT_CMD,
+	CCI_GPIO_OUT_CMD,
+	CCI_GPIO_TRIG_EVENT_CMD,
+	CCI_GPIO_REPORT_CMD,
+	CCI_GPIO_REPEAT_CMD,
+	CCI_GPIO_CONTINUE_CMD,
+	CCI_GPIO_INVALID_CMD,
+};
+
+#define VIDIOC_MSM_CCI_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 23, struct msm_camera_cci_ctrl *)
+
+#endif
+
diff --git a/drivers/media/video/msm/csi/Makefile b/drivers/media/video/msm/csi/Makefile
index d11e2d2..547eb13 100644
--- a/drivers/media/video/msm/csi/Makefile
+++ b/drivers/media/video/msm/csi/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_MSM_CSI2_REGISTER) += msm_csi2_register.o
 obj-$(CONFIG_MSM_CSIPHY) += msm_csiphy.o
 obj-$(CONFIG_MSM_CSID) += msm_csid.o
+obj-$(CONFIG_MSM_ISPIF) += msm_ispif.o
 obj-$(CONFIG_ARCH_MSM8960) += msm_csi2_register.o msm_csiphy.o msm_csid.o msm_ispif.o
 obj-$(CONFIG_ARCH_MSM7X27A) += msm_csic_register.o msm_csic.o
 obj-$(CONFIG_ARCH_MSM8X60) += msm_csic_register.o msm_csic.o
diff --git a/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h b/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h
new file mode 100644
index 0000000..ecc4fea2
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi2.0/msm_ispif_hwreg.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_HWREG_H
+#define MSM_ISPIF_HWREG_H
+
+
+/* ISPIF registers */
+
+#define ISPIF_RST_CMD_ADDR                        0x00
+#define ISPIF_RST_CMD_1_ADDR                      0x00
+#define ISPIF_INTF_CMD_ADDR                       0x04
+#define ISPIF_INTF_CMD_1_ADDR                     0x30
+#define ISPIF_CTRL_ADDR                           0x08
+#define ISPIF_INPUT_SEL_ADDR                      0x0C
+#define ISPIF_PIX_0_INTF_CID_MASK_ADDR            0x10
+#define ISPIF_RDI_0_INTF_CID_MASK_ADDR            0x14
+#define ISPIF_PIX_1_INTF_CID_MASK_ADDR            0x38
+#define ISPIF_RDI_1_INTF_CID_MASK_ADDR            0x3C
+#define ISPIF_RDI_2_INTF_CID_MASK_ADDR            0x44
+#define ISPIF_PIX_0_STATUS_ADDR                   0x24
+#define ISPIF_RDI_0_STATUS_ADDR                   0x28
+#define ISPIF_PIX_1_STATUS_ADDR                   0x60
+#define ISPIF_RDI_1_STATUS_ADDR                   0x64
+#define ISPIF_RDI_2_STATUS_ADDR                   0x6C
+#define ISPIF_IRQ_MASK_ADDR                     0x0100
+#define ISPIF_IRQ_CLEAR_ADDR                    0x0104
+#define ISPIF_IRQ_STATUS_ADDR                   0x0108
+#define ISPIF_IRQ_MASK_1_ADDR                   0x010C
+#define ISPIF_IRQ_CLEAR_1_ADDR                  0x0110
+#define ISPIF_IRQ_STATUS_1_ADDR                 0x0114
+#define ISPIF_IRQ_MASK_2_ADDR                   0x0118
+#define ISPIF_IRQ_CLEAR_2_ADDR                  0x011C
+#define ISPIF_IRQ_STATUS_2_ADDR                 0x0120
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR         0x0124
+
+/*ISPIF RESET BITS*/
+
+#define VFE_CLK_DOMAIN_RST           31
+#define RDI_CLK_DOMAIN_RST           30
+#define PIX_CLK_DOMAIN_RST           29
+#define AHB_CLK_DOMAIN_RST           28
+#define RDI_1_CLK_DOMAIN_RST         27
+#define RDI_2_VFE_RST_STB            19
+#define RDI_2_CSID_RST_STB           18
+#define RDI_1_VFE_RST_STB            13
+#define RDI_1_CSID_RST_STB           12
+#define RDI_0_VFE_RST_STB            7
+#define RDI_0_CSID_RST_STB           6
+#define PIX_1_VFE_RST_STB            10
+#define PIX_1_CSID_RST_STB           9
+#define PIX_0_VFE_RST_STB            4
+#define PIX_0_CSID_RST_STB           3
+#define SW_REG_RST_STB               2
+#define MISC_LOGIC_RST_STB           1
+#define STROBED_RST_EN               0
+
+#define PIX_INTF_0_OVERFLOW_IRQ      12
+#define RAW_INTF_0_OVERFLOW_IRQ      25
+#define RAW_INTF_1_OVERFLOW_IRQ      25
+#define RESET_DONE_IRQ               27
+
+#define ISPIF_IRQ_STATUS_MASK        0xA493000
+#define ISPIF_IRQ_1_STATUS_MASK      0xA493000
+#define ISPIF_IRQ_STATUS_RDI_SOF_MASK	0x492000
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD     0x1
+
+#endif
diff --git a/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h b/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h
new file mode 100644
index 0000000..820adf4
--- /dev/null
+++ b/drivers/media/video/msm/csi/include/csi3.0/msm_ispif_hwreg.h
@@ -0,0 +1,91 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_HWREG_H
+#define MSM_ISPIF_HWREG_H
+
+
+/* ISPIF registers */
+
+#define ISPIF_RST_CMD_ADDR                        0x08
+#define ISPIF_RST_CMD_1_ADDR                      0x0C
+#define ISPIF_INTF_CMD_ADDR                      0x248
+#define ISPIF_INTF_CMD_1_ADDR                    0x24C
+#define ISPIF_CTRL_ADDR                           0x08
+#define ISPIF_INPUT_SEL_ADDR                     0x244
+#define ISPIF_PIX_0_INTF_CID_MASK_ADDR           0x254
+#define ISPIF_RDI_0_INTF_CID_MASK_ADDR           0x264
+#define ISPIF_PIX_1_INTF_CID_MASK_ADDR           0x258
+#define ISPIF_RDI_1_INTF_CID_MASK_ADDR           0x268
+#define ISPIF_RDI_2_INTF_CID_MASK_ADDR           0x26C
+#define ISPIF_PIX_0_STATUS_ADDR                  0x2C0
+#define ISPIF_RDI_0_STATUS_ADDR                  0x2D0
+#define ISPIF_PIX_1_STATUS_ADDR                  0x2C4
+#define ISPIF_RDI_1_STATUS_ADDR                  0x2D4
+#define ISPIF_RDI_2_STATUS_ADDR                  0x2D8
+#define ISPIF_IRQ_MASK_ADDR                      0x208
+#define ISPIF_IRQ_CLEAR_ADDR                     0x230
+#define ISPIF_IRQ_STATUS_ADDR                    0x21C
+#define ISPIF_IRQ_MASK_1_ADDR                    0x20C
+#define ISPIF_IRQ_CLEAR_1_ADDR                   0x234
+#define ISPIF_IRQ_STATUS_1_ADDR                  0x220
+#define ISPIF_IRQ_MASK_2_ADDR                    0x210
+#define ISPIF_IRQ_CLEAR_2_ADDR                   0x238
+#define ISPIF_IRQ_STATUS_2_ADDR                  0x224
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR           0x1C
+
+/* new */
+#define ISPIF_VFE_m_CTRL_0_ADDR                  0x200
+#define ISPIF_VFE_m_IRQ_MASK_0                   0x208
+#define ISPIF_VFE_m_IRQ_MASK_1                   0x20C
+#define ISPIF_VFE_m_IRQ_MASK_2                   0x210
+#define ISPIF_VFE_m_IRQ_STATUS_0                 0x21C
+#define ISPIF_VFE_m_IRQ_STATUS_1                 0x220
+#define ISPIF_VFE_m_IRQ_STATUS_2                 0x224
+#define ISPIF_VFE_m_IRQ_CLEAR_0                  0x230
+#define ISPIF_VFE_m_IRQ_CLEAR_1                  0x234
+#define ISPIF_VFE_m_IRQ_CLEAR_2                  0x238
+
+/*ISPIF RESET BITS*/
+
+#define VFE_CLK_DOMAIN_RST           31
+#define RDI_CLK_DOMAIN_RST           26
+#define RDI_1_CLK_DOMAIN_RST         27
+#define RDI_2_CLK_DOMAIN_RST         28
+#define PIX_CLK_DOMAIN_RST           29
+#define PIX_1_CLK_DOMAIN_RST         30
+#define AHB_CLK_DOMAIN_RST           25
+#define RDI_2_VFE_RST_STB            12
+#define RDI_2_CSID_RST_STB           11
+#define RDI_1_VFE_RST_STB            10
+#define RDI_1_CSID_RST_STB           9
+#define RDI_0_VFE_RST_STB            8
+#define RDI_0_CSID_RST_STB           7
+#define PIX_1_VFE_RST_STB            6
+#define PIX_1_CSID_RST_STB           5
+#define PIX_0_VFE_RST_STB            4
+#define PIX_0_CSID_RST_STB           3
+#define SW_REG_RST_STB               2
+#define MISC_LOGIC_RST_STB           1
+#define STROBED_RST_EN               0
+
+#define PIX_INTF_0_OVERFLOW_IRQ      12
+#define RAW_INTF_0_OVERFLOW_IRQ      25
+#define RAW_INTF_1_OVERFLOW_IRQ      25
+#define RESET_DONE_IRQ               27
+
+#define ISPIF_IRQ_STATUS_MASK        0xA493000
+#define ISPIF_IRQ_1_STATUS_MASK      0xA493000
+#define ISPIF_IRQ_STATUS_RDI_SOF_MASK	0x492000
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD     0x1
+
+#endif
diff --git a/drivers/media/video/msm/csi/msm_ispif.c b/drivers/media/video/msm/csi/msm_ispif.c
index 1494644..b23efb5 100644
--- a/drivers/media/video/msm/csi/msm_ispif.c
+++ b/drivers/media/video/msm/csi/msm_ispif.c
@@ -14,78 +14,30 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <mach/gpio.h>
 #include <mach/camera.h>
 #include "msm_ispif.h"
 #include "msm.h"
+#include "msm_ispif_hwreg.h"
 
-#define V4L2_IDENT_ISPIF			50001
-#define CSID_VERSION_V2                      0x2000011
-
-/* ISPIF registers */
-
-#define ISPIF_RST_CMD_ADDR                        0X00
-#define ISPIF_INTF_CMD_ADDR                       0X04
-#define ISPIF_CTRL_ADDR                           0X08
-#define ISPIF_INPUT_SEL_ADDR                      0X0C
-#define ISPIF_PIX_INTF_CID_MASK_ADDR              0X10
-#define ISPIF_RDI_INTF_CID_MASK_ADDR              0X14
-#define ISPIF_PIX_1_INTF_CID_MASK_ADDR            0X38
-#define ISPIF_RDI_1_INTF_CID_MASK_ADDR            0X3C
-#define ISPIF_PIX_STATUS_ADDR                     0X24
-#define ISPIF_RDI_STATUS_ADDR                     0X28
-#define ISPIF_RDI_1_STATUS_ADDR                   0X64
-#define ISPIF_IRQ_MASK_ADDR                     0X0100
-#define ISPIF_IRQ_CLEAR_ADDR                    0X0104
-#define ISPIF_IRQ_STATUS_ADDR                   0X0108
-#define ISPIF_IRQ_MASK_1_ADDR                   0X010C
-#define ISPIF_IRQ_CLEAR_1_ADDR                  0X0110
-#define ISPIF_IRQ_STATUS_1_ADDR                 0X0114
-#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR         0x0124
-
-/*ISPIF RESET BITS*/
-
-#define VFE_CLK_DOMAIN_RST           31
-#define RDI_CLK_DOMAIN_RST           30
-#define PIX_CLK_DOMAIN_RST           29
-#define AHB_CLK_DOMAIN_RST           28
-#define RDI_1_CLK_DOMAIN_RST         27
-#define RDI_1_VFE_RST_STB            13
-#define RDI_1_CSID_RST_STB           12
-#define RDI_VFE_RST_STB              7
-#define RDI_CSID_RST_STB             6
-#define PIX_VFE_RST_STB              4
-#define PIX_CSID_RST_STB             3
-#define SW_REG_RST_STB               2
-#define MISC_LOGIC_RST_STB           1
-#define STROBED_RST_EN               0
-
-#define PIX_INTF_0_OVERFLOW_IRQ      12
-#define RAW_INTF_0_OVERFLOW_IRQ      25
-#define RAW_INTF_1_OVERFLOW_IRQ      25
-#define RESET_DONE_IRQ               27
-
-#define ISPIF_IRQ_STATUS_MASK        0xA493000
-#define ISPIF_IRQ_1_STATUS_MASK      0xA493000
-#define ISPIF_IRQ_STATUS_RDI_SOF_MASK	0x492000
-#define ISPIF_IRQ_GLOBAL_CLEAR_CMD     0x1
-
+#define V4L2_IDENT_ISPIF                     50001
+#define CSID_VERSION_V2                      0x02000011
+#define CSID_VERSION_V3                      0x30000000
 #define MAX_CID 15
 
+static atomic_t ispif_irq_cnt;
+static spinlock_t ispif_tasklet_lock;
+static struct list_head ispif_tasklet_q;
 
-static struct ispif_device *ispif;
-atomic_t ispif_irq_cnt;
-spinlock_t  ispif_tasklet_lock;
-struct list_head ispif_tasklet_q;
-
-static uint32_t global_intf_cmd_mask = 0xFFFFFFFF;
-
-
-static int msm_ispif_intf_reset(uint8_t intfmask)
+static int msm_ispif_intf_reset(struct ispif_device *ispif,
+	uint16_t intfmask, uint8_t vfe_intf)
 {
 	int rc = 0;
-	uint32_t data = 0x1;
-	uint8_t intfnum = 0, mask = intfmask;
+	uint32_t data = (0x1 << STROBED_RST_EN);
+	uint32_t data1 = (0x1 << STROBED_RST_EN);
+	uint16_t intfnum = 0, mask = intfmask;
+
 	while (mask != 0) {
 		if (!(intfmask & (0x1 << intfnum))) {
 			mask >>= 1;
@@ -94,21 +46,48 @@
 		}
 		switch (intfnum) {
 		case PIX0:
-			data = (0x1 << STROBED_RST_EN) +
-				(0x1 << PIX_VFE_RST_STB) +
-				(0x1 << PIX_CSID_RST_STB);
+			if (vfe_intf == VFE0)
+				data |= (0x1 << PIX_0_VFE_RST_STB) |
+					(0x1 << PIX_0_CSID_RST_STB);
+			else
+				data1 |= (0x1 << PIX_0_VFE_RST_STB) |
+					(0x1 << PIX_0_CSID_RST_STB);
 			break;
 
 		case RDI0:
-			data = (0x1 << STROBED_RST_EN) +
-				(0x1 << RDI_VFE_RST_STB)  +
-				(0x1 << RDI_CSID_RST_STB);
+			if (vfe_intf == VFE0)
+				data |= (0x1 << RDI_0_VFE_RST_STB) |
+					(0x1 << RDI_0_CSID_RST_STB);
+			else
+				data1 |= (0x1 << RDI_0_VFE_RST_STB) |
+					(0x1 << RDI_0_CSID_RST_STB);
+			break;
+
+		case PIX1:
+			if (vfe_intf == VFE0)
+				data |= (0x1 << PIX_1_VFE_RST_STB) |
+					(0x1 << PIX_1_CSID_RST_STB);
+			else
+				data1 |= (0x1 << PIX_1_VFE_RST_STB) |
+					(0x1 << PIX_1_CSID_RST_STB);
 			break;
 
 		case RDI1:
-			data = (0x1 << STROBED_RST_EN) +
-				(0x1 << RDI_1_VFE_RST_STB) +
-				(0x1 << RDI_1_CSID_RST_STB);
+			if (vfe_intf == VFE0)
+				data |= (0x1 << RDI_1_VFE_RST_STB) |
+					(0x1 << RDI_1_CSID_RST_STB);
+			else
+				data1 |= (0x1 << RDI_1_VFE_RST_STB) |
+					(0x1 << RDI_1_CSID_RST_STB);
+			break;
+
+		case RDI2:
+			if (vfe_intf == VFE0)
+				data |= (0x1 << RDI_2_VFE_RST_STB) |
+					(0x1 << RDI_2_CSID_RST_STB);
+			else
+				data1 |= (0x1 << RDI_2_VFE_RST_STB) |
+					(0x1 << RDI_2_CSID_RST_STB);
 			break;
 
 		default:
@@ -118,27 +97,44 @@
 		mask >>= 1;
 		intfnum++;
 	}	/*end while */
-	if (rc >= 0) {
-		msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
-		rc = wait_for_completion_interruptible(&ispif->reset_complete);
+	msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
+	rc = wait_for_completion_interruptible(&ispif->reset_complete);
+	if (ispif->csid_version >= CSID_VERSION_V3 && data1 > 0x1) {
+		msm_camera_io_w(data1,
+			ispif->base + ISPIF_RST_CMD_1_ADDR);
+		rc = wait_for_completion_interruptible(&ispif->
+			reset_complete);
 	}
 
 	return rc;
 }
 
-static int msm_ispif_reset(void)
+static int msm_ispif_reset(struct ispif_device *ispif)
 {
-	uint32_t data = (0x1 << STROBED_RST_EN) +
-		(0x1 << SW_REG_RST_STB) +
-		(0x1 << MISC_LOGIC_RST_STB) +
-		(0x1 << PIX_VFE_RST_STB) +
-		(0x1 << PIX_CSID_RST_STB) +
-		(0x1 << RDI_VFE_RST_STB) +
-		(0x1 << RDI_CSID_RST_STB) +
-		(0x1 << RDI_1_VFE_RST_STB) +
+	int rc = 0;
+	uint32_t data = (0x1 << STROBED_RST_EN) |
+		(0x1 << SW_REG_RST_STB) |
+		(0x1 << MISC_LOGIC_RST_STB) |
+		(0x1 << PIX_0_VFE_RST_STB) |
+		(0x1 << PIX_0_CSID_RST_STB) |
+		(0x1 << RDI_0_VFE_RST_STB) |
+		(0x1 << RDI_0_CSID_RST_STB) |
+		(0x1 << RDI_1_VFE_RST_STB) |
 		(0x1 << RDI_1_CSID_RST_STB);
+
+	if (ispif->csid_version >= CSID_VERSION_V2)
+		data |= (0x1 << PIX_1_VFE_RST_STB) |
+			(0x1 << PIX_1_CSID_RST_STB) |
+			(0x1 << RDI_2_VFE_RST_STB) |
+			(0x1 << RDI_2_CSID_RST_STB);
+	return 0;
 	msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_ADDR);
-	return wait_for_completion_interruptible(&ispif->reset_complete);
+	rc = wait_for_completion_interruptible(&ispif->reset_complete);
+	if (ispif->csid_version >= CSID_VERSION_V3) {
+		msm_camera_io_w(data, ispif->base + ISPIF_RST_CMD_1_ADDR);
+		rc = wait_for_completion_interruptible(&ispif->reset_complete);
+	}
+	return rc;
 }
 
 static int msm_ispif_subdev_g_chip_ident(struct v4l2_subdev *sd,
@@ -150,10 +146,12 @@
 	return 0;
 }
 
-static void msm_ispif_sel_csid_core(uint8_t intftype, uint8_t csid)
+static void msm_ispif_sel_csid_core(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
 {
 	int rc = 0;
 	uint32_t data;
+
 	if (ispif->ispif_clk[intftype] == NULL) {
 		pr_err("%s: ispif NULL clk\n", __func__);
 		return;
@@ -163,62 +161,156 @@
 	if (rc < 0)
 		pr_err("%s: clk_set_rate failed %d\n", __func__, rc);
 
-	data = msm_camera_io_r(ispif->base + ISPIF_INPUT_SEL_ADDR);
-	data |= csid<<(intftype*4);
-	msm_camera_io_w(data, ispif->base + ISPIF_INPUT_SEL_ADDR);
+	data = msm_camera_io_r(ispif->base + ISPIF_INPUT_SEL_ADDR +
+		(0x200 * vfe_intf));
+	switch (intftype) {
+	case PIX0:
+		data |= csid;
+		break;
+
+	case RDI0:
+		data |= (csid << 4);
+		break;
+
+	case PIX1:
+		data |= (csid << 8);
+		break;
+
+	case RDI1:
+		data |= (csid << 12);
+		break;
+
+	case RDI2:
+		data |= (csid << 20);
+		break;
+	}
+	msm_camera_io_w(data, ispif->base + ISPIF_INPUT_SEL_ADDR +
+		(0x200 * vfe_intf));
 }
 
-static void msm_ispif_enable_intf_cids(uint8_t intftype, uint16_t cid_mask)
+static void msm_ispif_enable_intf_cids(struct ispif_device *ispif,
+	uint8_t intftype, uint16_t cid_mask, uint8_t vfe_intf)
 {
 	uint32_t data;
 	mutex_lock(&ispif->mutex);
 	switch (intftype) {
 	case PIX0:
 		data = msm_camera_io_r(ispif->base +
-				ISPIF_PIX_INTF_CID_MASK_ADDR);
+			ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		data |= cid_mask;
 		msm_camera_io_w(data, ispif->base +
-				ISPIF_PIX_INTF_CID_MASK_ADDR);
+			ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 
 	case RDI0:
 		data = msm_camera_io_r(ispif->base +
-				ISPIF_RDI_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		data |= cid_mask;
 		msm_camera_io_w(data, ispif->base +
-				ISPIF_RDI_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case PIX1:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		data |= cid_mask;
+		msm_camera_io_w(data, ispif->base +
+			ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 
 	case RDI1:
 		data = msm_camera_io_r(ispif->base +
-				ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		data |= cid_mask;
 		msm_camera_io_w(data, ispif->base +
-				ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case RDI2:
+		data = msm_camera_io_r(ispif->base +
+			ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		data |= cid_mask;
+		msm_camera_io_w(data, ispif->base +
+			ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 	}
 	mutex_unlock(&ispif->mutex);
 }
 
-static int msm_ispif_config(struct msm_ispif_params_list *params_list)
+static int32_t msm_ispif_validate_intf_status(struct ispif_device *ispif,
+	uint8_t intftype, uint8_t vfe_intf)
+{
+	int32_t rc = 0;
+	uint32_t data;
+	mutex_lock(&ispif->mutex);
+	switch (intftype) {
+	case PIX0:
+		data = msm_camera_io_r(ispif->base +
+				ISPIF_PIX_0_STATUS_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case RDI0:
+		data = msm_camera_io_r(ispif->base +
+				ISPIF_RDI_0_STATUS_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case PIX1:
+		data = msm_camera_io_r(ispif->base +
+				ISPIF_PIX_1_STATUS_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case RDI1:
+		data = msm_camera_io_r(ispif->base +
+				ISPIF_RDI_1_STATUS_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case RDI2:
+		data = msm_camera_io_r(ispif->base +
+				ISPIF_RDI_2_STATUS_ADDR + (0x200 * vfe_intf));
+		break;
+	}
+	if ((data & 0xf) != 0xf)
+		rc = -EBUSY;
+	mutex_unlock(&ispif->mutex);
+	return rc;
+}
+
+static int msm_ispif_config(struct ispif_device *ispif,
+	struct msm_ispif_params_list *params_list)
 {
 	uint32_t params_len;
 	struct msm_ispif_params *ispif_params;
-	uint32_t data, data1;
 	int rc = 0, i = 0;
+	uint8_t intftype;
+	uint8_t vfe_intf;
 	params_len = params_list->len;
 	ispif_params = params_list->params;
 	CDBG("Enable interface\n");
-	data = msm_camera_io_r(ispif->base + ISPIF_PIX_STATUS_ADDR);
-	data1 = msm_camera_io_r(ispif->base + ISPIF_RDI_STATUS_ADDR);
-	if (((data & 0xf) != 0xf) || ((data1 & 0xf) != 0xf))
-		return -EBUSY;
 	msm_camera_io_w(0x00000000, ispif->base + ISPIF_IRQ_MASK_ADDR);
 	for (i = 0; i < params_len; i++) {
-		msm_ispif_sel_csid_core(ispif_params[i].intftype,
-			ispif_params[i].csid);
-		msm_ispif_enable_intf_cids(ispif_params[i].intftype,
-			ispif_params[i].cid_mask);
+		intftype = ispif_params[i].intftype;
+		vfe_intf = ispif_params[i].vfe_intf;
+		CDBG("%s intftype %x, vfe_intf %d\n", __func__, intftype,
+			vfe_intf);
+		if ((intftype >= INTF_MAX) ||
+			(ispif->csid_version <= CSID_VERSION_V2 &&
+			vfe_intf > VFE0) ||
+			(ispif->csid_version == CSID_VERSION_V3 &&
+			vfe_intf >= VFE_MAX)) {
+			pr_err("%s: intftype / vfe intf not valid\n",
+				__func__);
+			return -EINVAL;
+		}
+
+		rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf);
+		if (rc < 0) {
+			pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+			return rc;
+		}
+		msm_ispif_sel_csid_core(ispif, intftype, ispif_params[i].csid,
+			vfe_intf);
+		msm_ispif_enable_intf_cids(ispif, intftype,
+			ispif_params[i].cid_mask, vfe_intf);
 	}
 
 	msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
@@ -230,23 +322,34 @@
 	return rc;
 }
 
-static uint32_t msm_ispif_get_cid_mask(uint8_t intftype)
+static uint32_t msm_ispif_get_cid_mask(struct ispif_device *ispif,
+	uint16_t intftype, uint8_t vfe_intf)
 {
 	uint32_t mask = 0;
 	switch (intftype) {
 	case PIX0:
 		mask = msm_camera_io_r(ispif->base +
-			ISPIF_PIX_INTF_CID_MASK_ADDR);
+			ISPIF_PIX_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 
 	case RDI0:
 		mask = msm_camera_io_r(ispif->base +
-			ISPIF_RDI_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_0_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case PIX1:
+		mask = msm_camera_io_r(ispif->base +
+			ISPIF_PIX_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 
 	case RDI1:
 		mask = msm_camera_io_r(ispif->base +
-			ISPIF_RDI_1_INTF_CID_MASK_ADDR);
+			ISPIF_RDI_1_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
+		break;
+
+	case RDI2:
+		mask = msm_camera_io_r(ispif->base +
+			ISPIF_RDI_2_INTF_CID_MASK_ADDR + (0x200 * vfe_intf));
 		break;
 
 	default:
@@ -255,12 +358,14 @@
 	return mask;
 }
 
-static void
-msm_ispif_intf_cmd(uint8_t intfmask, uint8_t intf_cmd_mask)
+static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint16_t intfmask,
+	uint8_t intf_cmd_mask, uint8_t vfe_intf)
 {
-	uint8_t vc = 0, val = 0;
-	uint8_t mask = intfmask, intfnum = 0;
+	uint8_t vc = 0;
+	uint16_t mask = intfmask, intfnum = 0;
 	uint32_t cid_mask = 0;
+	uint32_t global_intf_cmd_mask = 0xFFFFFFFF;
+	uint32_t global_intf_cmd_mask1 = 0xFFFFFFFF;
 	while (mask != 0) {
 		if (!(intfmask & (0x1 << intfnum))) {
 			mask >>= 1;
@@ -268,17 +373,20 @@
 			continue;
 		}
 
-		cid_mask = msm_ispif_get_cid_mask(intfnum);
+		cid_mask = msm_ispif_get_cid_mask(ispif, intfnum, vfe_intf);
 		vc = 0;
 
 		while (cid_mask != 0) {
 			if ((cid_mask & 0xf) != 0x0) {
-				val = (intf_cmd_mask>>(vc*2)) & 0x3;
-				global_intf_cmd_mask |=
-					(0x3 << ((vc * 2) + (intfnum * 8)));
-				global_intf_cmd_mask &= ~((0x3 & ~val)
-					<< ((vc * 2) +
-					(intfnum * 8)));
+				if (intfnum != RDI2)
+					global_intf_cmd_mask &=
+						~((0x3 & ~intf_cmd_mask)
+						<< ((vc * 2) +
+						(intfnum * 8)));
+				else
+					global_intf_cmd_mask1 &=
+						~((0x3 & ~intf_cmd_mask)
+						<< ((vc * 2) + 8));
 			}
 			vc++;
 			cid_mask >>= 4;
@@ -287,50 +395,57 @@
 		intfnum++;
 	}
 	msm_camera_io_w(global_intf_cmd_mask,
-		ispif->base + ISPIF_INTF_CMD_ADDR);
+		ispif->base + ISPIF_INTF_CMD_ADDR + (0x200 * vfe_intf));
+	if (global_intf_cmd_mask1 != 0xFFFFFFFF)
+		msm_camera_io_w(global_intf_cmd_mask1,
+			ispif->base + ISPIF_INTF_CMD_1_ADDR +
+			(0x200 * vfe_intf));
 }
 
-static int msm_ispif_abort_intf_transfer(uint8_t intfmask)
+static int msm_ispif_abort_intf_transfer(struct ispif_device *ispif,
+	uint16_t intfmask, uint8_t vfe_intf)
 {
 	int rc = 0;
-	uint8_t intf_cmd_mask = 0xAA;
-	uint8_t intfnum = 0, mask = intfmask;
+	uint8_t intf_cmd_mask = 0x02;
 	mutex_lock(&ispif->mutex);
-	msm_ispif_intf_cmd(intfmask, intf_cmd_mask);
-	while (mask != 0) {
-		if (intfmask & (0x1 << intfnum))
-			global_intf_cmd_mask |= (0xFF << (intfnum * 8));
-		mask >>= 1;
-		intfnum++;
-	}
+	CDBG("%s intfmask %x intf_cmd_mask %x\n", __func__, intfmask,
+		intf_cmd_mask);
+	msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf);
 	mutex_unlock(&ispif->mutex);
 	return rc;
 }
 
-static int msm_ispif_start_intf_transfer(uint8_t intfmask)
+static int msm_ispif_start_intf_transfer(struct ispif_device *ispif,
+	uint16_t intfmask, uint8_t vfe_intf)
 {
-	uint8_t intf_cmd_mask = 0x55;
+	uint8_t intf_cmd_mask = 0x01;
 	int rc = 0;
 	mutex_lock(&ispif->mutex);
-	rc = msm_ispif_intf_reset(intfmask);
-	msm_ispif_intf_cmd(intfmask, intf_cmd_mask);
+	rc = msm_ispif_intf_reset(ispif, intfmask, vfe_intf);
+	CDBG("%s intfmask %x intf_cmd_mask %x\n", __func__, intfmask,
+		intf_cmd_mask);
+	msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf);
 	mutex_unlock(&ispif->mutex);
 	return rc;
 }
 
-static int msm_ispif_stop_intf_transfer(uint8_t intfmask)
+static int msm_ispif_stop_intf_transfer(struct ispif_device *ispif,
+	uint16_t intfmask, uint8_t vfe_intf)
 {
 	int rc = 0;
 	uint8_t intf_cmd_mask = 0x00;
-	uint8_t intfnum = 0, mask = intfmask;
+	uint16_t intfnum = 0, mask = intfmask;
 	mutex_lock(&ispif->mutex);
-	msm_ispif_intf_cmd(intfmask, intf_cmd_mask);
+	CDBG("%s intfmask %x intf_cmd_mask %x\n", __func__, intfmask,
+		intf_cmd_mask);
+	msm_ispif_intf_cmd(ispif, intfmask, intf_cmd_mask, vfe_intf);
 	while (mask != 0) {
 		if (intfmask & (0x1 << intfnum)) {
 			switch (intfnum) {
 			case PIX0:
 				while ((msm_camera_io_r(ispif->base +
-					ISPIF_PIX_STATUS_ADDR)
+					ISPIF_PIX_0_STATUS_ADDR +
+					(0x200 * vfe_intf))
 					& 0xf) != 0xf) {
 					CDBG("Wait for pix0 Idle\n");
 				}
@@ -338,24 +453,43 @@
 
 			case RDI0:
 				while ((msm_camera_io_r(ispif->base +
-					ISPIF_RDI_STATUS_ADDR)
+					ISPIF_RDI_0_STATUS_ADDR +
+					(0x200 * vfe_intf))
 					& 0xf) != 0xf) {
 					CDBG("Wait for rdi0 Idle\n");
 				}
 				break;
 
+			case PIX1:
+				while ((msm_camera_io_r(ispif->base +
+					ISPIF_PIX_1_STATUS_ADDR +
+					(0x200 * vfe_intf))
+					& 0xf) != 0xf) {
+					CDBG("Wait for pix1 Idle\n");
+				}
+				break;
+
 			case RDI1:
 				while ((msm_camera_io_r(ispif->base +
-					ISPIF_RDI_1_STATUS_ADDR)
+					ISPIF_RDI_1_STATUS_ADDR +
+					(0x200 * vfe_intf))
 					& 0xf) != 0xf) {
 					CDBG("Wait for rdi1 Idle\n");
 				}
 				break;
 
+			case RDI2:
+				while ((msm_camera_io_r(ispif->base +
+					ISPIF_RDI_2_STATUS_ADDR +
+					(0x200 * vfe_intf))
+					& 0xf) != 0xf) {
+					CDBG("Wait for rdi2 Idle\n");
+				}
+				break;
+
 			default:
 				break;
 			}
-			global_intf_cmd_mask |= (0xFF << (intfnum * 8));
 		}
 		mask >>= 1;
 		intfnum++;
@@ -364,24 +498,33 @@
 	return rc;
 }
 
-static int msm_ispif_subdev_video_s_stream(struct v4l2_subdev *sd, int enable)
+static int msm_ispif_subdev_video_s_stream(struct v4l2_subdev *sd,
+	int enable)
 {
 	struct ispif_device *ispif =
 			(struct ispif_device *)v4l2_get_subdevdata(sd);
-	int32_t cmd = enable & ((1<<ISPIF_S_STREAM_SHIFT)-1);
-	enum msm_ispif_intftype intf = enable >> ISPIF_S_STREAM_SHIFT;
+	uint32_t cmd = enable & ((1<<ISPIF_S_STREAM_SHIFT)-1);
+	uint16_t intf = enable >> ISPIF_S_STREAM_SHIFT;
+	uint8_t vfe_intf = enable >> ISPIF_VFE_INTF_SHIFT;
 	int rc = -EINVAL;
-
+	CDBG("%s enable %x, cmd %x, intf %x\n", __func__, enable, cmd, intf);
 	BUG_ON(!ispif);
+	if ((ispif->csid_version <= CSID_VERSION_V2 && vfe_intf > VFE0) ||
+		(ispif->csid_version == CSID_VERSION_V3 &&
+		vfe_intf >= VFE_MAX)) {
+		pr_err("%s invalid csid version %x && vfe intf %d\n", __func__,
+			ispif->csid_version, vfe_intf);
+		return rc;
+	}
 	switch (cmd) {
 	case ISPIF_ON_FRAME_BOUNDARY:
-		rc = msm_ispif_start_intf_transfer(intf);
+		rc = msm_ispif_start_intf_transfer(ispif, intf, vfe_intf);
 		break;
 	case ISPIF_OFF_FRAME_BOUNDARY:
-		rc = msm_ispif_stop_intf_transfer(intf);
+		rc = msm_ispif_stop_intf_transfer(ispif, intf, vfe_intf);
 		break;
 	case ISPIF_OFF_IMMEDIATELY:
-		rc = msm_ispif_abort_intf_transfer(intf);
+		rc = msm_ispif_abort_intf_transfer(ispif, intf, vfe_intf);
 		break;
 	default:
 		break;
@@ -449,8 +592,10 @@
 	return;
 }
 
-static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out)
+static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out,
+	void *data)
 {
+	struct ispif_device *ispif = (struct ispif_device *)data;
 	out->ispifIrqStatus0 = msm_camera_io_r(ispif->base +
 		ISPIF_IRQ_STATUS_ADDR);
 	out->ispifIrqStatus1 = msm_camera_io_r(ispif->base +
@@ -482,7 +627,7 @@
 static irqreturn_t msm_io_ispif_irq(int irq_num, void *data)
 {
 	struct ispif_irq_status irq;
-	msm_ispif_read_irq_status(&irq);
+	msm_ispif_read_irq_status(&irq, data);
 	return IRQ_HANDLED;
 }
 
@@ -494,19 +639,20 @@
 	{"csi_rdi2_clk", 0},
 };
 
-static int msm_ispif_init(const uint32_t *csid_version)
+static int msm_ispif_init(struct ispif_device *ispif,
+	const uint32_t *csid_version)
 {
 	int rc = 0;
+	CDBG("%s called %d\n", __func__, __LINE__);
 	spin_lock_init(&ispif_tasklet_lock);
 	INIT_LIST_HEAD(&ispif_tasklet_q);
 	rc = request_irq(ispif->irq->start, msm_io_ispif_irq,
-		IRQF_TRIGGER_RISING, "ispif", 0);
+		IRQF_TRIGGER_RISING, "ispif", ispif);
 
-	global_intf_cmd_mask = 0xFFFFFFFF;
 	init_completion(&ispif->reset_complete);
 
 	ispif->csid_version = *csid_version;
-	if (ispif->csid_version == CSID_VERSION_V2) {
+	if (ispif->csid_version >= CSID_VERSION_V2) {
 		rc = msm_cam_clk_enable(&ispif->pdev->dev, ispif_clk_info,
 			ispif->ispif_clk, ARRAY_SIZE(ispif_clk_info), 1);
 		if (rc < 0)
@@ -517,15 +663,12 @@
 		if (rc < 0)
 			return rc;
 	}
-	rc = msm_ispif_reset();
+	rc = msm_ispif_reset(ispif);
 	return rc;
 }
 
-static void msm_ispif_release(struct v4l2_subdev *sd)
+static void msm_ispif_release(struct ispif_device *ispif)
 {
-	struct ispif_device *ispif =
-			(struct ispif_device *)v4l2_get_subdevdata(sd);
-
 	CDBG("%s, free_irq\n", __func__);
 	free_irq(ispif->irq->start, 0);
 	tasklet_kill(&ispif_tasklet);
@@ -538,43 +681,12 @@
 			ispif->ispif_clk, 2, 0);
 }
 
-void msm_ispif_vfe_get_cid(uint8_t intftype, char *cids, int *num)
-{
-	uint32_t data = 0;
-	int i = 0, j = 0;
-	switch (intftype) {
-	case PIX0:
-		data = msm_camera_io_r(ispif->base +
-			ISPIF_PIX_INTF_CID_MASK_ADDR);
-		break;
-
-	case RDI0:
-		data = msm_camera_io_r(ispif->base +
-			ISPIF_RDI_INTF_CID_MASK_ADDR);
-		break;
-
-	case RDI1:
-		data = msm_camera_io_r(ispif->base +
-			ISPIF_RDI_1_INTF_CID_MASK_ADDR);
-		break;
-
-	default:
-		break;
-	}
-	for (i = 0; i <= MAX_CID; i++) {
-		if ((data & 0x1) == 0x1) {
-			cids[j++] = i;
-			(*num)++;
-		}
-		data >>= 1;
-	}
-}
-
 static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
 {
 	long rc = 0;
 	struct ispif_cfg_data cdata;
-
+	struct ispif_device *ispif =
+		(struct ispif_device *)v4l2_get_subdevdata(sd);
 	if (copy_from_user(&cdata, (void *)arg, sizeof(struct ispif_cfg_data)))
 		return -EFAULT;
 	CDBG("%s cfgtype = %d\n", __func__, cdata.cfgtype);
@@ -582,7 +694,7 @@
 	case ISPIF_INIT:
 		CDBG("%s csid_version = %x\n", __func__,
 			cdata.cfg.csid_version);
-		rc = msm_ispif_init(&cdata.cfg.csid_version);
+		rc = msm_ispif_init(ispif, &cdata.cfg.csid_version);
 		break;
 	case ISPIF_SET_CFG:
 		CDBG("%s len = %d, intftype = %d,.cid_mask = %d, csid = %d\n",
@@ -591,7 +703,7 @@
 			cdata.cfg.ispif_params.params[0].intftype,
 			cdata.cfg.ispif_params.params[0].cid_mask,
 			cdata.cfg.ispif_params.params[0].csid);
-		rc = msm_ispif_config(&cdata.cfg.ispif_params);
+		rc = msm_ispif_config(ispif, &cdata.cfg.ispif_params);
 		break;
 
 	case ISPIF_SET_ON_FRAME_BOUNDARY:
@@ -600,7 +712,7 @@
 		rc = msm_ispif_subdev_video_s_stream(sd, cdata.cfg.cmd);
 		break;
 	case ISPIF_RELEASE:
-		msm_ispif_release(sd);
+		msm_ispif_release(ispif);
 		break;
 	default:
 		break;
@@ -640,6 +752,7 @@
 {
 	int rc = 0;
 	struct msm_cam_subdev_info sd_info;
+	struct ispif_device *ispif;
 
 	CDBG("%s\n", __func__);
 	ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL);
@@ -659,6 +772,10 @@
 								"ispif");
 	mutex_init(&ispif->mutex);
 
+	if (pdev->dev.of_node)
+		of_property_read_u32((&pdev->dev)->of_node,
+			"cell-index", &pdev->id);
+
 	ispif->mem = platform_get_resource_byname(pdev,
 					IORESOURCE_MEM, "ispif");
 	if (!ispif->mem) {
@@ -709,11 +826,18 @@
 	return rc;
 }
 
+static const struct of_device_id msm_ispif_dt_match[] = {
+	{.compatible = "qcom,ispif"},
+};
+
+MODULE_DEVICE_TABLE(of, msm_ispif_dt_match);
+
 static struct platform_driver ispif_driver = {
 	.probe = ispif_probe,
 	.driver = {
 		.name = MSM_ISPIF_DRV_NAME,
 		.owner = THIS_MODULE,
+		.of_match_table = msm_ispif_dt_match,
 	},
 };
 
diff --git a/drivers/media/video/msm/csi/msm_ispif.h b/drivers/media/video/msm/csi/msm_ispif.h
index 7b301ba..df93a44 100644
--- a/drivers/media/video/msm/csi/msm_ispif.h
+++ b/drivers/media/video/msm/csi/msm_ispif.h
@@ -45,6 +45,4 @@
 #define VIDIOC_MSM_ISPIF_CFG \
 	_IOWR('V', BASE_VIDIOC_PRIVATE + 18, struct ispif_cfg_data*)
 
-void msm_ispif_vfe_get_cid(uint8_t intftype, char *cids, int *num);
-
 #endif
diff --git a/drivers/media/video/msm/io/Makefile b/drivers/media/video/msm/io/Makefile
index 611eecd..fdff226 100644
--- a/drivers/media/video/msm/io/Makefile
+++ b/drivers/media/video/msm/io/Makefile
@@ -1,6 +1,6 @@
 GCC_VERSION      := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
 
-EXTRA_CFLAGS += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/video/msm -Idrivers/media/video/msm/cci
 obj-$(CONFIG_MSM_CAMERA)   += msm_camera_io_util.o msm_camera_i2c.o
 ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
   obj-$(CONFIG_MSM_CAMERA) += msm_camera_i2c_mux.o
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.c b/drivers/media/video/msm/io/msm_camera_i2c.c
index e946569..82bca02 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.c
+++ b/drivers/media/video/msm/io/msm_camera_i2c.c
@@ -10,7 +10,10 @@
  * GNU General Public License for more details.
  */
 
+#include <mach/camera.h>
 #include "msm_camera_i2c.h"
+#include "msm.h"
+#include "msm_cci.h"
 
 int32_t msm_camera_i2c_rxdata(struct msm_camera_i2c_client *dev_client,
 	unsigned char *rxdata, int data_length)
@@ -63,6 +66,7 @@
 	int32_t rc = -EFAULT;
 	unsigned char buf[client->addr_type+data_type];
 	uint8_t len = 0;
+
 	if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
 		&& client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
 		|| (data_type != MSM_CAMERA_I2C_BYTE_DATA
@@ -71,33 +75,51 @@
 
 	S_I2C_DBG("%s reg addr = 0x%x data type: %d\n",
 			  __func__, addr, data_type);
-	if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
-		buf[0] = addr;
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
-		len = 1;
-	} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
-		buf[0] = addr >> BITS_PER_BYTE;
-		buf[1] = addr;
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len+1, buf[len+1]);
-		len = 2;
+	if (client->cci_client) {
+		struct msm_camera_cci_ctrl cci_ctrl;
+		struct msm_camera_i2c_reg_conf reg_conf_tbl;
+		reg_conf_tbl.reg_addr = addr;
+		reg_conf_tbl.reg_data = data;
+		cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+		cci_ctrl.cci_info = client->cci_client;
+		cci_ctrl.cfg.cci_i2c_write_cfg.reg_conf_tbl = &reg_conf_tbl;
+		cci_ctrl.cfg.cci_i2c_write_cfg.data_type = data_type;
+		cci_ctrl.cfg.cci_i2c_write_cfg.size = 1;
+		rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+				core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+		CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		rc = cci_ctrl.status;
+	} else {
+		if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+			buf[0] = addr;
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len, buf[len]);
+			len = 1;
+		} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+			buf[0] = addr >> BITS_PER_BYTE;
+			buf[1] = addr;
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len, buf[len]);
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len+1, buf[len+1]);
+			len = 2;
+		}
+		S_I2C_DBG("Data: 0x%x\n", data);
+		if (data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+			buf[len] = data;
+			S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
+			len += 1;
+		} else if (data_type == MSM_CAMERA_I2C_WORD_DATA) {
+			buf[len] = data >> BITS_PER_BYTE;
+			buf[len+1] = data;
+			S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
+			S_I2C_DBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+			len += 2;
+		}
+		rc = msm_camera_i2c_txdata(client, buf, len);
+		if (rc < 0)
+			S_I2C_DBG("%s fail\n", __func__);
 	}
-	S_I2C_DBG("Data: 0x%x\n", data);
-	if (data_type == MSM_CAMERA_I2C_BYTE_DATA) {
-		buf[len] = data;
-		S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
-		len += 1;
-	} else if (data_type == MSM_CAMERA_I2C_WORD_DATA) {
-		buf[len] = data >> BITS_PER_BYTE;
-		buf[len+1] = data;
-		S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
-		S_I2C_DBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
-		len += 2;
-	}
-
-	rc = msm_camera_i2c_txdata(client, buf, len);
-	if (rc < 0)
-		S_I2C_DBG("%s fail\n", __func__);
 	return rc;
 }
 
@@ -115,26 +137,44 @@
 
 	S_I2C_DBG("%s reg addr = 0x%x num bytes: %d\n",
 			  __func__, addr, num_byte);
-	if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
-		buf[0] = addr;
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
-		len = 1;
-	} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
-		buf[0] = addr >> BITS_PER_BYTE;
-		buf[1] = addr;
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len, buf[len]);
-		S_I2C_DBG("%s byte %d: 0x%x\n", __func__, len+1, buf[len+1]);
-		len = 2;
+	if (client->cci_client) {
+		struct msm_camera_cci_ctrl cci_ctrl;
+		struct msm_camera_i2c_reg_conf reg_conf_tbl[num_byte];
+		reg_conf_tbl[0].reg_addr = addr;
+		for (i = 0; i < num_byte; i++)
+			reg_conf_tbl[i].reg_data = data[i];
+		cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+		cci_ctrl.cci_info = client->cci_client;
+		cci_ctrl.cfg.cci_i2c_write_cfg.reg_conf_tbl = reg_conf_tbl;
+		cci_ctrl.cfg.cci_i2c_write_cfg.size = num_byte;
+		rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+				core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+		CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		rc = cci_ctrl.status;
+	} else {
+		if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+			buf[0] = addr;
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len, buf[len]);
+			len = 1;
+		} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+			buf[0] = addr >> BITS_PER_BYTE;
+			buf[1] = addr;
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len, buf[len]);
+			S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+				len+1, buf[len+1]);
+			len = 2;
+		}
+		for (i = 0; i < num_byte; i++) {
+			buf[i+len] = data[i];
+			S_I2C_DBG("Byte %d: 0x%x\n", i+len, buf[i+len]);
+			S_I2C_DBG("Data: 0x%x\n", data[i]);
+		}
+		rc = msm_camera_i2c_txdata(client, buf, len+num_byte);
+		if (rc < 0)
+			S_I2C_DBG("%s fail\n", __func__);
 	}
-	for (i = 0; i < num_byte; i++) {
-		buf[i+len] = data[i];
-		S_I2C_DBG("Byte %d: 0x%x\n", i+len, buf[i+len]);
-		S_I2C_DBG("Data: 0x%x\n", data[i]);
-	}
-
-	rc = msm_camera_i2c_txdata(client, buf, len+num_byte);
-	if (rc < 0)
-		S_I2C_DBG("%s fail\n", __func__);
 	return rc;
 }
 
@@ -302,65 +342,80 @@
 {
 	int i;
 	int32_t rc = -EFAULT;
-	for (i = 0; i < size; i++) {
-		enum msm_camera_i2c_data_type dt;
-		if (reg_conf_tbl->cmd_type == MSM_CAMERA_I2C_CMD_POLL) {
-			rc = msm_camera_i2c_poll(client, reg_conf_tbl->reg_addr,
-				reg_conf_tbl->reg_data, reg_conf_tbl->dt);
-		} else {
-			if (reg_conf_tbl->dt == 0)
-				dt = data_type;
-			else
-				dt = reg_conf_tbl->dt;
-
-			switch (dt) {
-			case MSM_CAMERA_I2C_BYTE_DATA:
-			case MSM_CAMERA_I2C_WORD_DATA:
-				rc = msm_camera_i2c_write(
-					client,
-					reg_conf_tbl->reg_addr,
-					reg_conf_tbl->reg_data, dt);
-				break;
-			case MSM_CAMERA_I2C_SET_BYTE_MASK:
-				rc = msm_camera_i2c_set_mask(client,
+	if (client->cci_client) {
+		struct msm_camera_cci_ctrl cci_ctrl;
+		cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+		cci_ctrl.cci_info = client->cci_client;
+		cci_ctrl.cfg.cci_i2c_write_cfg.reg_conf_tbl = reg_conf_tbl;
+		cci_ctrl.cfg.cci_i2c_write_cfg.data_type = data_type;
+		cci_ctrl.cfg.cci_i2c_write_cfg.size = size;
+		rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+				core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+		CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		rc = cci_ctrl.status;
+	} else {
+		for (i = 0; i < size; i++) {
+			enum msm_camera_i2c_data_type dt;
+			if (reg_conf_tbl->cmd_type == MSM_CAMERA_I2C_CMD_POLL) {
+				rc = msm_camera_i2c_poll(client,
 					reg_conf_tbl->reg_addr,
 					reg_conf_tbl->reg_data,
-					MSM_CAMERA_I2C_BYTE_DATA, 1);
-				break;
-			case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
-				rc = msm_camera_i2c_set_mask(client,
-					reg_conf_tbl->reg_addr,
-					reg_conf_tbl->reg_data,
-					MSM_CAMERA_I2C_BYTE_DATA, 0);
-				break;
-			case MSM_CAMERA_I2C_SET_WORD_MASK:
-				rc = msm_camera_i2c_set_mask(client,
-					reg_conf_tbl->reg_addr,
-					reg_conf_tbl->reg_data,
-					MSM_CAMERA_I2C_WORD_DATA, 1);
-				break;
-			case MSM_CAMERA_I2C_UNSET_WORD_MASK:
-				rc = msm_camera_i2c_set_mask(client,
-					reg_conf_tbl->reg_addr,
-					reg_conf_tbl->reg_data,
-					MSM_CAMERA_I2C_WORD_DATA, 0);
-				break;
-			case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
-				rc = msm_camera_i2c_set_write_mask_data(client,
-					reg_conf_tbl->reg_addr,
-					reg_conf_tbl->reg_data,
-					reg_conf_tbl->mask,
-					MSM_CAMERA_I2C_BYTE_DATA);
-				break;
-			default:
-				pr_err("%s: Unsupport data type: %d\n",
-					__func__, dt);
-				break;
+					reg_conf_tbl->dt);
+			} else {
+				if (reg_conf_tbl->dt == 0)
+					dt = data_type;
+				else
+					dt = reg_conf_tbl->dt;
+				switch (dt) {
+				case MSM_CAMERA_I2C_BYTE_DATA:
+				case MSM_CAMERA_I2C_WORD_DATA:
+					rc = msm_camera_i2c_write(
+						client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data, dt);
+					break;
+				case MSM_CAMERA_I2C_SET_BYTE_MASK:
+					rc = msm_camera_i2c_set_mask(client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data,
+						MSM_CAMERA_I2C_BYTE_DATA, 1);
+					break;
+				case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+					rc = msm_camera_i2c_set_mask(client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data,
+						MSM_CAMERA_I2C_BYTE_DATA, 0);
+					break;
+				case MSM_CAMERA_I2C_SET_WORD_MASK:
+					rc = msm_camera_i2c_set_mask(client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data,
+						MSM_CAMERA_I2C_WORD_DATA, 1);
+					break;
+				case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+					rc = msm_camera_i2c_set_mask(client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data,
+						MSM_CAMERA_I2C_WORD_DATA, 0);
+					break;
+				case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
+					rc = msm_camera_i2c_set_write_mask_data(
+						client,
+						reg_conf_tbl->reg_addr,
+						reg_conf_tbl->reg_data,
+						reg_conf_tbl->mask,
+						MSM_CAMERA_I2C_BYTE_DATA);
+					break;
+				default:
+					pr_err("%s: Unsupport data type: %d\n",
+						__func__, dt);
+					break;
+				}
 			}
+			if (rc < 0)
+				break;
+			reg_conf_tbl++;
 		}
-		if (rc < 0)
-			break;
-		reg_conf_tbl++;
 	}
 	return rc;
 }
@@ -378,16 +433,30 @@
 		&& data_type != MSM_CAMERA_I2C_WORD_DATA))
 		return rc;
 
-	if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
-		buf[0] = addr;
-	} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
-		buf[0] = addr >> BITS_PER_BYTE;
-		buf[1] = addr;
-	}
-	rc = msm_camera_i2c_rxdata(client, buf, data_type);
-	if (rc < 0) {
-		S_I2C_DBG("%s fail\n", __func__);
-		return rc;
+	if (client->cci_client) {
+		struct msm_camera_cci_ctrl cci_ctrl;
+		cci_ctrl.cmd = MSM_CCI_I2C_READ;
+		cci_ctrl.cci_info = client->cci_client;
+		cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+		cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = client->addr_type;
+		cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+		cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type;
+		rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+				core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+		CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		rc = cci_ctrl.status;
+	} else {
+		if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+			buf[0] = addr;
+		} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+			buf[0] = addr >> BITS_PER_BYTE;
+			buf[1] = addr;
+		}
+		rc = msm_camera_i2c_rxdata(client, buf, data_type);
+		if (rc < 0) {
+			S_I2C_DBG("%s fail\n", __func__);
+			return rc;
+		}
 	}
 	if (data_type == MSM_CAMERA_I2C_BYTE_DATA)
 		*data = buf[0];
@@ -410,16 +479,30 @@
 		|| num_byte == 0)
 		return rc;
 
-	if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
-		buf[0] = addr;
-	} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
-		buf[0] = addr >> BITS_PER_BYTE;
-		buf[1] = addr;
-	}
-	rc = msm_camera_i2c_rxdata(client, buf, num_byte);
-	if (rc < 0) {
-		S_I2C_DBG("%s fail\n", __func__);
-		return rc;
+	if (client->cci_client) {
+		struct msm_camera_cci_ctrl cci_ctrl;
+		cci_ctrl.cmd = MSM_CCI_I2C_READ;
+		cci_ctrl.cci_info = client->cci_client;
+		cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+		cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = client->addr_type;
+		cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+		cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
+		rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+				core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+		CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+		rc = cci_ctrl.status;
+	} else {
+		if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+			buf[0] = addr;
+		} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+			buf[0] = addr >> BITS_PER_BYTE;
+			buf[1] = addr;
+		}
+		rc = msm_camera_i2c_rxdata(client, buf, num_byte);
+		if (rc < 0) {
+			S_I2C_DBG("%s fail\n", __func__);
+			return rc;
+		}
 	}
 
 	S_I2C_DBG("%s addr = 0x%x", __func__, addr);
@@ -486,3 +569,17 @@
 	return rc;
 }
 
+int32_t msm_sensor_cci_util(struct msm_camera_i2c_client *client,
+	uint16_t cci_cmd)
+{
+	int32_t rc = 0;
+	struct msm_camera_cci_ctrl cci_ctrl;
+
+	CDBG("%s line %d\n", __func__, __LINE__);
+	cci_ctrl.cmd = cci_cmd;
+	cci_ctrl.cci_info = client->cci_client;
+	rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+			core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+	CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+	return cci_ctrl.status;
+}
diff --git a/drivers/media/video/msm/io/msm_camera_i2c.h b/drivers/media/video/msm/io/msm_camera_i2c.h
index 188a176..169a0b3 100644
--- a/drivers/media/video/msm/io/msm_camera_i2c.h
+++ b/drivers/media/video/msm/io/msm_camera_i2c.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
 #include <linux/i2c.h>
 #include <linux/delay.h>
 #include <mach/camera.h>
+#include <media/v4l2-subdev.h>
 
 #define CONFIG_MSM_CAMERA_I2C_DBG 0
 
@@ -25,39 +26,12 @@
 #define S_I2C_DBG(fmt, args...) CDBG(fmt, ##args)
 #endif
 
-enum msm_camera_i2c_reg_addr_type {
-	MSM_CAMERA_I2C_BYTE_ADDR = 1,
-	MSM_CAMERA_I2C_WORD_ADDR,
-};
-
 struct msm_camera_i2c_client {
 	struct i2c_client *client;
+	struct msm_camera_cci_client *cci_client;
 	enum msm_camera_i2c_reg_addr_type addr_type;
 };
 
-enum msm_camera_i2c_data_type {
-	MSM_CAMERA_I2C_BYTE_DATA = 1,
-	MSM_CAMERA_I2C_WORD_DATA,
-	MSM_CAMERA_I2C_SET_BYTE_MASK,
-	MSM_CAMERA_I2C_UNSET_BYTE_MASK,
-	MSM_CAMERA_I2C_SET_WORD_MASK,
-	MSM_CAMERA_I2C_UNSET_WORD_MASK,
-	MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA,
-};
-
-enum msm_camera_i2c_cmd_type {
-	MSM_CAMERA_I2C_CMD_WRITE,
-	MSM_CAMERA_I2C_CMD_POLL,
-};
-
-struct msm_camera_i2c_reg_conf {
-	uint16_t reg_addr;
-	uint16_t reg_data;
-	enum msm_camera_i2c_data_type dt;
-	enum msm_camera_i2c_cmd_type cmd_type;
-	int16_t mask;
-};
-
 struct msm_camera_i2c_reg_tbl {
 	uint16_t reg_addr;
 	uint16_t reg_data;
@@ -130,4 +104,7 @@
 
 int32_t msm_sensor_write_all_conf_array(struct msm_camera_i2c_client *client,
 	struct msm_camera_i2c_conf_array *array, uint16_t size);
+
+int32_t msm_sensor_cci_util(struct msm_camera_i2c_client *client,
+	uint16_t cci_cmd);
 #endif
diff --git a/drivers/media/video/msm/msm.h b/drivers/media/video/msm/msm.h
index d77defd..687ed74 100644
--- a/drivers/media/video/msm/msm.h
+++ b/drivers/media/video/msm/msm.h
@@ -60,6 +60,7 @@
 #define MSM_I2C_MUX_DRV_NAME "msm_cam_i2c_mux"
 #define MSM_IRQ_ROUTER_DRV_NAME "msm_cam_irq_router"
 #define MSM_CPP_DRV_NAME "msm_cpp"
+#define MSM_CCI_DRV_NAME "msm_cci"
 
 #define MAX_NUM_SENSOR_DEV 3
 #define MAX_NUM_CSIPHY_DEV 3
@@ -71,6 +72,7 @@
 #define MAX_NUM_VPE_DEV 1
 #define MAX_NUM_JPEG_DEV 3
 #define MAX_NUM_CPP_DEV 1
+#define MAX_NUM_CCI_DEV 1
 
 /* msm queue management APIs*/
 
@@ -536,6 +538,7 @@
 	struct v4l2_subdev *gesture_device;
 	struct v4l2_subdev *cpp_device[MAX_NUM_CPP_DEV];
 	struct v4l2_subdev *irqr_device;
+	struct v4l2_subdev *cci_device;
 
 	spinlock_t  intr_table_lock;
 	struct irqmgr_intr_lkup_table irq_lkup_table;
diff --git a/drivers/media/video/msm/msm_isp.c b/drivers/media/video/msm/msm_isp.c
index 67e7c02..935ce75 100644
--- a/drivers/media/video/msm/msm_isp.c
+++ b/drivers/media/video/msm/msm_isp.c
@@ -188,9 +188,9 @@
 	}
 
 	switch (vdata->type) {
-	case VFE_MSG_V32_START:
-	case VFE_MSG_V32_START_RECORDING:
-	case VFE_MSG_V2X_PREVIEW:
+	case VFE_MSG_START:
+	case VFE_MSG_START_RECORDING:
+	case VFE_MSG_PREVIEW:
 		D("%s Got V32_START_*: Getting ping addr id = %d",
 						__func__, vfe_id);
 		msm_mctl_reserve_free_buf(pmctl, NULL,
@@ -208,8 +208,7 @@
 		vfe_params.data = (void *)&free_buf;
 		rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
 		break;
-	case VFE_MSG_V32_CAPTURE:
-	case VFE_MSG_V2X_CAPTURE:
+	case VFE_MSG_CAPTURE:
 		pr_debug("%s Got V32_CAPTURE: getting buffer for id = %d",
 						__func__, vfe_id);
 		msm_mctl_reserve_free_buf(pmctl, NULL,
@@ -231,8 +230,8 @@
 		vfe_params.data = (void *)&free_buf;
 		rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
 		break;
-	case VFE_MSG_V32_JPEG_CAPTURE:
-		D("%s:VFE_MSG_V32_JPEG_CAPTURE vdata->type %d\n", __func__,
+	case VFE_MSG_JPEG_CAPTURE:
+		D("%s:VFE_MSG_JPEG_CAPTURE vdata->type %d\n", __func__,
 			vdata->type);
 		free_buf.num_planes = 2;
 		free_buf.ch_paddr[0] = pmctl->ping_imem_y;
@@ -241,7 +240,7 @@
 		cfgcmd.value = &vfe_id;
 		vfe_params.vfe_cfg = &cfgcmd;
 		vfe_params.data = (void *)&free_buf;
-		D("%s:VFE_MSG_V32_JPEG_CAPTURE y_ping=%x cbcr_ping=%x\n",
+		D("%s:VFE_MSG_JPEG_CAPTURE y_ping=%x cbcr_ping=%x\n",
 			__func__, free_buf.ch_paddr[0], free_buf.ch_paddr[1]);
 		rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
 		/* Write the same buffer into PONG */
@@ -251,7 +250,7 @@
 		cfgcmd.value = &vfe_id;
 		vfe_params.vfe_cfg = &cfgcmd;
 		vfe_params.data = (void *)&free_buf;
-		D("%s:VFE_MSG_V32_JPEG_CAPTURE y_pong=%x cbcr_pong=%x\n",
+		D("%s:VFE_MSG_JPEG_CAPTURE y_pong=%x cbcr_pong=%x\n",
 			__func__, free_buf.ch_paddr[0], free_buf.ch_paddr[1]);
 		rc = v4l2_subdev_call(sd, core, ioctl, 0, &vfe_params);
 		break;
@@ -424,10 +423,12 @@
 		stats.buf_idx = isp_stats->buf_idx;
 		switch (isp_stats->id) {
 		case MSG_ID_STATS_AEC:
+		case MSG_ID_STATS_BG:
 			stats.aec.buff = stats.buffer;
 			stats.aec.fd = stats.fd;
 			break;
 		case MSG_ID_STATS_AF:
+		case MSG_ID_STATS_BF:
 			stats.af.buff = stats.buffer;
 			stats.af.fd = stats.fd;
 			break;
@@ -447,6 +448,10 @@
 			stats.cs.buff = stats.buffer;
 			stats.cs.fd = stats.fd;
 			break;
+		case MSG_ID_STATS_BHIST:
+			stats.skin.buff = stats.buffer;
+			stats.skin.fd = stats.fd;
+			break;
 		case MSG_ID_STATS_AWB_AEC:
 			break;
 		default:
@@ -537,6 +542,9 @@
 	memset(&axi_data, 0, sizeof(axi_data));
 	CDBG("%s: cmd_type %d\n", __func__, cfgcmd.cmd_type);
 	switch (cfgcmd.cmd_type) {
+	case CMD_STATS_BG_ENABLE:
+	case CMD_STATS_BF_ENABLE:
+	case CMD_STATS_BHIST_ENABLE:
 	case CMD_STATS_AF_ENABLE:
 	case CMD_STATS_AEC_ENABLE:
 	case CMD_STATS_AWB_ENABLE:
@@ -629,6 +637,12 @@
 			cfgcmd.cmd_type = CMD_STATS_CS_BUF_RELEASE;
 		else if (buf.type == STAT_AEAW)
 			cfgcmd.cmd_type = CMD_STATS_BUF_RELEASE;
+		else if (buf.type == STAT_BG)
+			cfgcmd.cmd_type = CMD_STATS_BG_BUF_RELEASE;
+		else if (buf.type == STAT_BF)
+			cfgcmd.cmd_type = CMD_STATS_BF_BUF_RELEASE;
+		else if (buf.type == STAT_BHIST)
+			cfgcmd.cmd_type = CMD_STATS_BHIST_BUF_RELEASE;
 
 		else {
 			pr_err("%s: invalid buf type %d\n",
@@ -673,7 +687,6 @@
 	}
 	case MSM_CAM_IOCTL_STATS_ENQUEUEBUF: {
 		struct msm_stats_buf_info buf_info;
-
 		if (copy_from_user(&buf_info, arg,
 			sizeof(struct msm_stats_buf_info))) {
 			ERR_COPY_FROM_USER();
@@ -687,18 +700,30 @@
 	}
 	case MSM_CAM_IOCTL_STATS_FLUSH_BUFQ: {
 		struct msm_stats_flush_bufq bufq_info;
-
 		if (copy_from_user(&bufq_info, arg,
 			sizeof(struct msm_stats_flush_bufq))) {
 			ERR_COPY_FROM_USER();
 			return -EFAULT;
-	}
+		}
 	cfgcmd.cmd_type = VFE_CMD_STATS_FLUSH_BUFQ;
 	cfgcmd.value = (void *)&bufq_info;
 	cfgcmd.length = sizeof(struct msm_stats_flush_bufq);
 	rc = msm_isp_subdev_ioctl(sd, &cfgcmd, NULL);
 	break;
 	}
+	case MSM_CAM_IOCTL_STATS_UNREG_BUF: {
+		struct msm_stats_reqbuf reqbuf;
+		if (copy_from_user(&reqbuf, arg,
+			sizeof(struct msm_stats_reqbuf))) {
+			ERR_COPY_FROM_USER();
+			return -EFAULT;
+		}
+	cfgcmd.cmd_type = VFE_CMD_STATS_UNREGBUF;
+	cfgcmd.value = (void *)&reqbuf;
+	cfgcmd.length = sizeof(struct msm_stats_reqbuf);
+	rc = msm_isp_subdev_ioctl(sd, &cfgcmd, (void *)mctl->client);
+	break;
+	}
 	default:
 		rc = -1;
 	break;
@@ -734,6 +759,7 @@
 	case MSM_CAM_IOCTL_STATS_REQBUF:
 	case MSM_CAM_IOCTL_STATS_ENQUEUEBUF:
 	case MSM_CAM_IOCTL_STATS_FLUSH_BUFQ:
+	case MSM_CAM_IOCTL_STATS_UNREG_BUF:
 		rc = msm_vfe_stats_buf_ioctl(sd, cmd, pmctl, argp);
 		break;
 
diff --git a/drivers/media/video/msm/msm_mctl_buf.c b/drivers/media/video/msm/msm_mctl_buf.c
index befd213..3cd6a25 100644
--- a/drivers/media/video/msm/msm_mctl_buf.c
+++ b/drivers/media/video/msm/msm_mctl_buf.c
@@ -721,11 +721,8 @@
 	uint32_t buf_phyaddr = 0;
 	int rc = -EINVAL;
 
-	if (!free_buf)
-		return rc;
-
-	if (!pcam_inst) {
-		pr_err("%s Invalid instance, buffer not released\n",
+	if (!pcam_inst || !free_buf) {
+		pr_err("%s Invalid argument, buffer will not be returned\n",
 			__func__);
 		return rc;
 	}
@@ -735,17 +732,19 @@
 		buf_phyaddr =
 			(uint32_t) videobuf2_to_pmem_contig(&buf->vidbuf, 0);
 		if (free_buf->ch_paddr[0] == buf_phyaddr) {
-			D("%s buf = 0x%x \n", __func__, free_buf->ch_paddr[0]);
-			buf->state = MSM_BUFFER_STATE_UNUSED;
+			D("%s Return buffer %d and mark it as QUEUED\n",
+				__func__, buf->vidbuf.v4l2_buf.index);
+			buf->state = MSM_BUFFER_STATE_QUEUED;
 			rc = 0;
 			break;
 		}
 	}
-
-	if (rc != 0)
-		pr_err("%s invalid buffer address ", __func__);
-
 	spin_unlock_irqrestore(&pcam_inst->vq_irqlock, flags);
+
+	if (rc)
+		pr_err("%s Cannot find buffer %x", __func__,
+			free_buf->ch_paddr[0]);
+
 	return rc;
 }
 
diff --git a/drivers/media/video/msm/msm_mem.c b/drivers/media/video/msm/msm_mem.c
index 5d412db..e2e9d1b 100644
--- a/drivers/media/video/msm/msm_mem.c
+++ b/drivers/media/video/msm/msm_mem.c
@@ -208,6 +208,9 @@
 	case MSM_PMEM_IHIST:
 	case MSM_PMEM_SKIN:
 	case MSM_PMEM_AEC_AWB:
+	case MSM_PMEM_BAYER_GRID:
+	case MSM_PMEM_BAYER_FOCUS:
+	case MSM_PMEM_BAYER_HIST:
 		rc = msm_pmem_table_add(ptype, pinfo, client);
 		break;
 
@@ -235,6 +238,9 @@
 	case MSM_PMEM_IHIST:
 	case MSM_PMEM_SKIN:
 	case MSM_PMEM_AEC_AWB:
+	case MSM_PMEM_BAYER_GRID:
+	case MSM_PMEM_BAYER_FOCUS:
+	case MSM_PMEM_BAYER_HIST:
 		hlist_for_each_entry_safe(region, node, n,
 				ptype, list) {
 
diff --git a/drivers/media/video/msm/sensors/msm_sensor.c b/drivers/media/video/msm/sensors/msm_sensor.c
index 72f3f3d..f687573 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.c
+++ b/drivers/media/video/msm/sensors/msm_sensor.c
@@ -605,6 +605,781 @@
 	return 0;
 }
 
+static int32_t msm_sensor_init_flash_data(struct device_node *of_node,
+	struct  msm_camera_sensor_info *sensordata)
+{
+	int32_t rc = 0;
+	uint32_t val = 0;
+
+	sensordata->flash_data = kzalloc(sizeof(
+		struct msm_camera_sensor_flash_data), GFP_KERNEL);
+	if (!sensordata->flash_data) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	rc = of_property_read_u32(of_node, "flash_type", &val);
+	CDBG("%s flash_type %d, rc %d\n", __func__, val, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR;
+	}
+	sensordata->flash_data->flash_type = val;
+	return rc;
+ERROR:
+	kfree(sensordata->flash_data);
+	return rc;
+}
+
+static int32_t msm_sensor_init_vreg_data(struct device_node *of_node,
+	struct msm_camera_sensor_platform_info *pinfo)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+
+	count = of_property_count_strings(of_node, "cam_vreg_name");
+	CDBG("%s cam_vreg_name count %d\n", __func__, count);
+
+	if (!count)
+		return 0;
+
+	pinfo->cam_vreg = kzalloc(sizeof(struct camera_vreg_t) * count,
+		GFP_KERNEL);
+	if (!pinfo->cam_vreg) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	pinfo->num_vreg = count;
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "cam_vreg_name", i,
+			&pinfo->cam_vreg[i].reg_name);
+		CDBG("%s reg_name[%d] = %s\n", __func__, i,
+			pinfo->cam_vreg[i].reg_name);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto ERROR1;
+		}
+	}
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	rc = of_property_read_u32_array(of_node, "cam_vreg_type", val_array,
+		count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		pinfo->cam_vreg[i].type = val_array[i];
+		CDBG("%s cam_vreg[%d].type = %d\n", __func__, i,
+			pinfo->cam_vreg[i].type);
+	}
+
+	rc = of_property_read_u32_array(of_node, "cam_vreg_min_voltage",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		pinfo->cam_vreg[i].min_voltage = val_array[i];
+		CDBG("%s cam_vreg[%d].min_voltage = %d\n", __func__,
+			i, pinfo->cam_vreg[i].min_voltage);
+	}
+
+	rc = of_property_read_u32_array(of_node, "cam_vreg_max_voltage",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		pinfo->cam_vreg[i].max_voltage = val_array[i];
+		CDBG("%s cam_vreg[%d].max_voltage = %d\n", __func__,
+			i, pinfo->cam_vreg[i].max_voltage);
+	}
+
+	rc = of_property_read_u32_array(of_node, "cam_vreg_op_mode", val_array,
+		count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		pinfo->cam_vreg[i].op_mode = val_array[i];
+		CDBG("%s cam_vreg[%d].op_mode = %d\n", __func__, i,
+			pinfo->cam_vreg[i].op_mode);
+	}
+
+	kfree(val_array);
+	return rc;
+ERROR2:
+	kfree(val_array);
+ERROR1:
+	kfree(pinfo->cam_vreg);
+	pinfo->num_vreg = 0;
+	return rc;
+}
+
+static int32_t msm_sensor_init_gpio_common_tbl_data(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+
+	if (!of_get_property(of_node, "gpio_common_tbl_num", &count))
+		return 0;
+
+	count /= sizeof(uint32_t);
+
+	if (!count)
+		return 0;
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	gconf->cam_gpio_common_tbl = kzalloc(sizeof(struct gpio) * count,
+		GFP_KERNEL);
+	if (!gconf->cam_gpio_common_tbl) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+	gconf->cam_gpio_common_tbl_size = count;
+
+	rc = of_property_read_u32_array(of_node, "gpio_common_tbl_num",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_common_tbl[i].gpio = val_array[i];
+		CDBG("%s cam_gpio_common_tbl[%d].gpio = %d\n", __func__, i,
+			gconf->cam_gpio_common_tbl[i].gpio);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_common_tbl_flags",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_common_tbl[i].flags = val_array[i];
+		CDBG("%s cam_gpio_common_tbl[%d].flags = %ld\n", __func__, i,
+			gconf->cam_gpio_common_tbl[i].flags);
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node,
+			"gpio_common_tbl_label", i,
+			&gconf->cam_gpio_common_tbl[i].label);
+		CDBG("%s cam_gpio_common_tbl[%d].label = %s\n", __func__, i,
+			gconf->cam_gpio_common_tbl[i].label);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto ERROR2;
+		}
+	}
+
+	kfree(val_array);
+	return rc;
+
+ERROR2:
+	kfree(gconf->cam_gpio_common_tbl);
+ERROR1:
+	kfree(val_array);
+	gconf->cam_gpio_common_tbl_size = 0;
+	return rc;
+}
+
+static int32_t msm_sensor_init_gpio_req_tbl_data(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+
+	if (!of_get_property(of_node, "gpio_req_tbl_num", &count))
+		return 0;
+
+	count /= sizeof(uint32_t);
+
+	if (!count)
+		return 0;
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	gconf->cam_gpio_req_tbl = kzalloc(sizeof(struct gpio) * count,
+		GFP_KERNEL);
+	if (!gconf->cam_gpio_req_tbl) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+	gconf->cam_gpio_req_tbl_size = count;
+
+	rc = of_property_read_u32_array(of_node, "gpio_req_tbl_num",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_req_tbl[i].gpio = val_array[i];
+		CDBG("%s cam_gpio_req_tbl[%d].gpio = %d\n", __func__, i,
+			gconf->cam_gpio_req_tbl[i].gpio);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_req_tbl_flags",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_req_tbl[i].flags = val_array[i];
+		CDBG("%s cam_gpio_req_tbl[%d].flags = %ld\n", __func__, i,
+			gconf->cam_gpio_req_tbl[i].flags);
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node,
+			"gpio_req_tbl_label", i,
+			&gconf->cam_gpio_req_tbl[i].label);
+		CDBG("%s cam_gpio_req_tbl[%d].label = %s\n", __func__, i,
+			gconf->cam_gpio_req_tbl[i].label);
+		if (rc < 0) {
+			pr_err("%s failed %d\n", __func__, __LINE__);
+			goto ERROR2;
+		}
+	}
+
+	kfree(val_array);
+	return rc;
+
+ERROR2:
+	kfree(gconf->cam_gpio_req_tbl);
+ERROR1:
+	kfree(val_array);
+	gconf->cam_gpio_req_tbl_size = 0;
+	return rc;
+}
+
+static int32_t msm_sensor_init_gpio_set_tbl_data(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+
+	if (!of_get_property(of_node, "gpio_set_tbl_num", &count))
+		return 0;
+
+	count /= sizeof(uint32_t);
+
+	if (!count)
+		return 0;
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	gconf->cam_gpio_set_tbl = kzalloc(sizeof(struct msm_gpio_set_tbl) *
+		count, GFP_KERNEL);
+	if (!gconf->cam_gpio_set_tbl) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+	gconf->cam_gpio_set_tbl_size = count;
+
+	rc = of_property_read_u32_array(of_node, "gpio_set_tbl_num",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_set_tbl[i].gpio = val_array[i];
+		CDBG("%s cam_gpio_set_tbl[%d].gpio = %d\n", __func__, i,
+			gconf->cam_gpio_set_tbl[i].gpio);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_set_tbl_flags",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_set_tbl[i].flags = val_array[i];
+		CDBG("%s cam_gpio_set_tbl[%d].flags = %ld\n", __func__, i,
+			gconf->cam_gpio_set_tbl[i].flags);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_set_tbl_delay",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_set_tbl[i].delay = val_array[i];
+		CDBG("%s cam_gpio_set_tbl[%d].delay = %d\n", __func__, i,
+			gconf->cam_gpio_set_tbl[i].delay);
+	}
+
+	kfree(val_array);
+	return rc;
+
+ERROR2:
+	kfree(gconf->cam_gpio_set_tbl);
+ERROR1:
+	kfree(val_array);
+	gconf->cam_gpio_set_tbl_size = 0;
+	return rc;
+}
+
+static int32_t msm_sensor_init_gpio_tlmm_tbl_data(struct device_node *of_node,
+	struct msm_camera_gpio_conf *gconf)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+	struct gpio_tlmm_cfg *tlmm_cfg = NULL;
+
+	if (!of_get_property(of_node, "gpio_tlmm_table_num", &count))
+		return 0;
+
+	count /= sizeof(uint32_t);
+
+	if (!count)
+		return 0;
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	tlmm_cfg = kzalloc(sizeof(struct gpio_tlmm_cfg) * count, GFP_KERNEL);
+	if (!tlmm_cfg) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	gconf->camera_off_table = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!gconf->camera_off_table) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR2;
+	}
+	gconf->camera_off_table_size = count;
+
+	gconf->camera_on_table = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!gconf->camera_on_table) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR3;
+	}
+	gconf->camera_on_table_size = count;
+
+	rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_num",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR4;
+	}
+	for (i = 0; i < count; i++) {
+		tlmm_cfg[i].gpio = val_array[i];
+		CDBG("%s tlmm_cfg[%d].gpio = %d\n", __func__, i,
+			tlmm_cfg[i].gpio);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_dir",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR4;
+	}
+	for (i = 0; i < count; i++) {
+		tlmm_cfg[i].dir = val_array[i];
+		CDBG("%s tlmm_cfg[%d].dir = %d\n", __func__, i,
+			tlmm_cfg[i].dir);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_pull",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR4;
+	}
+	for (i = 0; i < count; i++) {
+		tlmm_cfg[i].pull = val_array[i];
+		CDBG("%s tlmm_cfg[%d].pull = %d\n", __func__, i,
+			tlmm_cfg[i].pull);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio_tlmm_table_drvstr",
+		val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR4;
+	}
+	for (i = 0; i < count; i++) {
+		tlmm_cfg[i].drvstr = val_array[i];
+		CDBG("%s tlmm_cfg[%d].drvstr = %d\n", __func__, i,
+			tlmm_cfg[i].drvstr);
+	}
+
+	for (i = 0; i < count; i++) {
+		gconf->camera_off_table[i] = GPIO_CFG(tlmm_cfg[i].gpio,
+			0, tlmm_cfg[i].dir, tlmm_cfg[i].pull,
+			tlmm_cfg[i].drvstr);
+		gconf->camera_on_table[i] = GPIO_CFG(tlmm_cfg[i].gpio,
+			1, tlmm_cfg[i].dir, tlmm_cfg[i].pull,
+			tlmm_cfg[i].drvstr);
+	}
+
+	kfree(tlmm_cfg);
+	kfree(val_array);
+	return rc;
+
+ERROR4:
+	kfree(gconf->camera_on_table);
+ERROR3:
+	kfree(gconf->camera_off_table);
+ERROR2:
+	kfree(tlmm_cfg);
+ERROR1:
+	kfree(val_array);
+	gconf->camera_off_table_size = 0;
+	gconf->camera_on_table_size = 0;
+	return rc;
+}
+
+static int32_t msm_sensor_init_csi_data(struct device_node *of_node,
+	struct  msm_camera_sensor_info *sensordata)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0, val = 0;
+	uint32_t *val_array = NULL;
+	struct msm_camera_sensor_platform_info *pinfo =
+		sensordata->sensor_platform_info;
+
+	rc = of_property_read_u32(of_node, "csi_if", &count);
+	CDBG("%s csi_if %d, rc %d\n", __func__, count, rc);
+	if (rc < 0 || !count)
+		return rc;
+	sensordata->csi_if = count;
+
+	sensordata->pdata = kzalloc(sizeof(
+		struct msm_camera_device_platform_data) * count, GFP_KERNEL);
+	if (!sensordata->pdata) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+	if (!val_array) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	rc = of_property_read_u32_array(of_node, "csid_core", val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		sensordata->pdata[i].csid_core = val_array[i];
+		CDBG("%s csid_core[%d].csid_core = %d\n", __func__, i,
+			sensordata->pdata[i].csid_core);
+	}
+
+	rc = of_property_read_u32_array(of_node, "is_vpe", val_array, count);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		sensordata->pdata[i].is_vpe = val_array[i];
+		CDBG("%s csid_core[%d].is_vpe = %d\n", __func__, i,
+			sensordata->pdata[i].is_vpe);
+	}
+
+	pinfo->csi_lane_params = kzalloc(
+		sizeof(struct msm_camera_csi_lane_params), GFP_KERNEL);
+	if (!pinfo->csi_lane_params) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR2;
+	}
+
+	rc = of_property_read_u32(of_node, "csi_lane_assign", &val);
+	CDBG("%s csi_lane_assign %x, rc %d\n", __func__, val, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR3;
+	}
+	pinfo->csi_lane_params->csi_lane_assign = val;
+
+	rc = of_property_read_u32(of_node, "csi_lane_mask", &val);
+	CDBG("%s csi_lane_mask %x, rc %d\n", __func__, val, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR3;
+	}
+	pinfo->csi_lane_params->csi_lane_mask = val;
+
+	kfree(val_array);
+	return rc;
+ERROR3:
+	kfree(pinfo->csi_lane_params);
+ERROR2:
+	kfree(val_array);
+ERROR1:
+	kfree(sensordata->pdata);
+	sensordata->csi_if = 0;
+	return rc;
+}
+static int32_t msm_sensor_init_actuator_data(struct device_node *of_node,
+	struct  msm_camera_sensor_info *sensordata)
+{
+	int32_t rc = 0;
+	uint32_t val = 0;
+
+	rc = of_property_read_u32(of_node, "actuator_cam_name", &val);
+	CDBG("%s actuator_cam_name %d, rc %d\n", __func__, val, rc);
+	if (rc < 0)
+		return 0;
+
+	sensordata->actuator_info = kzalloc(sizeof(struct msm_actuator_info),
+		GFP_KERNEL);
+	if (!sensordata->actuator_info) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR;
+	}
+
+	sensordata->actuator_info->cam_name = val;
+
+	rc = of_property_read_u32(of_node, "actuator_vcm_pwd", &val);
+	CDBG("%s actuator_vcm_pwd %d, rc %d\n", __func__, val, rc);
+	if (!rc)
+		sensordata->actuator_info->vcm_pwd = val;
+
+	rc = of_property_read_u32(of_node, "actuator_vcm_enable", &val);
+	CDBG("%s actuator_vcm_enable %d, rc %d\n", __func__, val, rc);
+	if (!rc)
+		sensordata->actuator_info->vcm_enable = val;
+
+	return 0;
+ERROR:
+	return rc;
+}
+
+static int32_t msm_sensor_init_sensor_data(struct platform_device *pdev,
+	struct msm_sensor_ctrl_t *s_ctrl)
+{
+	int32_t rc = 0;
+	uint32_t val = 0;
+	struct device_node *of_node = pdev->dev.of_node;
+	struct msm_camera_sensor_platform_info *pinfo = NULL;
+	struct msm_camera_gpio_conf *gconf = NULL;
+	struct msm_camera_sensor_info *sensordata = NULL;
+
+	s_ctrl->sensordata = kzalloc(sizeof(struct msm_camera_sensor_info),
+		GFP_KERNEL);
+	if (!s_ctrl->sensordata) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		return -ENOMEM;
+	}
+
+	sensordata = s_ctrl->sensordata;
+	rc = of_property_read_string(of_node, "sensor_name",
+		&sensordata->sensor_name);
+	CDBG("%s sensor_name %s, rc %d\n", __func__,
+		sensordata->sensor_name, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR1;
+	}
+
+	rc = of_property_read_u32(of_node, "camera_type", &val);
+	CDBG("%s camera_type %d, rc %d\n", __func__, val, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR1;
+	}
+	sensordata->camera_type = val;
+
+	rc = of_property_read_u32(of_node, "sensor_type", &val);
+	CDBG("%s sensor_type %d, rc %d\n", __func__, val, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR1;
+	}
+	sensordata->sensor_type = val;
+
+	rc = msm_sensor_init_flash_data(of_node, sensordata);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR1;
+	}
+
+	sensordata->sensor_platform_info = kzalloc(sizeof(
+		struct msm_camera_sensor_platform_info), GFP_KERNEL);
+	if (!sensordata->sensor_platform_info) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	pinfo = sensordata->sensor_platform_info;
+
+	rc = of_property_read_u32(of_node, "mount_angle", &pinfo->mount_angle);
+	CDBG("%s mount_angle %d, rc %d\n", __func__, pinfo->mount_angle, rc);
+	if (rc < 0) {
+		/* Set default mount angle */
+		pinfo->mount_angle = 0;
+		rc = 0;
+	}
+
+	rc = msm_sensor_init_csi_data(of_node, sensordata);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR2;
+	}
+
+	rc = msm_sensor_init_vreg_data(of_node, pinfo);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR3;
+	}
+
+	pinfo->gpio_conf = kzalloc(sizeof(struct msm_camera_gpio_conf),
+		GFP_KERNEL);
+	if (!pinfo->gpio_conf) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		rc = -ENOMEM;
+		goto ERROR4;
+	}
+	gconf = pinfo->gpio_conf;
+	rc = of_property_read_u32(of_node, "gpio_no_mux", &gconf->gpio_no_mux);
+	CDBG("%s gconf->gpio_no_mux %d, rc %d\n", __func__,
+		gconf->gpio_no_mux, rc);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR5;
+	}
+
+	rc = msm_sensor_init_gpio_common_tbl_data(of_node, gconf);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR5;
+	}
+
+	rc = msm_sensor_init_gpio_req_tbl_data(of_node, gconf);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR6;
+	}
+
+	rc = msm_sensor_init_gpio_set_tbl_data(of_node, gconf);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR7;
+	}
+
+	rc = msm_sensor_init_gpio_tlmm_tbl_data(of_node, gconf);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR8;
+	}
+
+	rc = msm_sensor_init_actuator_data(of_node, sensordata);
+	if (rc < 0) {
+		pr_err("%s failed %d\n", __func__, __LINE__);
+		goto ERROR9;
+	}
+
+	return rc;
+
+ERROR9:
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		camera_on_table);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		camera_off_table);
+ERROR8:
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_set_tbl);
+ERROR7:
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_req_tbl);
+ERROR6:
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_common_tbl);
+ERROR5:
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf);
+ERROR4:
+	kfree(s_ctrl->sensordata->sensor_platform_info->cam_vreg);
+ERROR3:
+	kfree(s_ctrl->sensordata->sensor_platform_info->csi_lane_params);
+	kfree(s_ctrl->sensordata->pdata);
+ERROR2:
+	kfree(s_ctrl->sensordata->sensor_platform_info);
+	kfree(s_ctrl->sensordata->flash_data);
+ERROR1:
+	kfree(s_ctrl->sensordata);
+	return rc;
+}
+
+int32_t msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl)
+{
+	if (!s_ctrl->pdev)
+		return 0;
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		camera_on_table);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		camera_off_table);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_set_tbl);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_req_tbl);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf->
+		cam_gpio_common_tbl);
+	kfree(s_ctrl->sensordata->sensor_platform_info->gpio_conf);
+	kfree(s_ctrl->sensordata->sensor_platform_info->cam_vreg);
+	kfree(s_ctrl->sensordata->sensor_platform_info->csi_lane_params);
+	kfree(s_ctrl->sensordata->pdata);
+	kfree(s_ctrl->sensordata->sensor_platform_info);
+	kfree(s_ctrl->sensordata->flash_data);
+	kfree(s_ctrl->sensordata);
+	return 0;
+}
+
 int32_t msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl)
 {
 	int32_t rc = 0;
@@ -666,8 +1441,21 @@
 		data->sensor_platform_info->i2c_conf->use_i2c_mux)
 		msm_sensor_enable_i2c_mux(data->sensor_platform_info->i2c_conf);
 
+	if (s_ctrl->sensor_i2c_client->cci_client) {
+		rc = msm_sensor_cci_util(s_ctrl->sensor_i2c_client,
+			MSM_CCI_INIT);
+		if (rc < 0) {
+			pr_err("%s cci_init failed\n", __func__);
+			goto cci_init_failed;
+		}
+	}
 	return rc;
 
+cci_init_failed:
+	if (data->sensor_platform_info->i2c_conf &&
+		data->sensor_platform_info->i2c_conf->use_i2c_mux)
+		msm_sensor_disable_i2c_mux(
+			data->sensor_platform_info->i2c_conf);
 enable_clk_failed:
 		msm_camera_config_gpio_table(data, 0);
 config_gpio_failed:
@@ -692,6 +1480,11 @@
 {
 	struct msm_camera_sensor_info *data = s_ctrl->sensordata;
 	CDBG("%s\n", __func__);
+	if (s_ctrl->sensor_i2c_client->cci_client) {
+		msm_sensor_cci_util(s_ctrl->sensor_i2c_client,
+			MSM_CCI_RELEASE);
+	}
+
 	if (data->sensor_platform_info->i2c_conf &&
 		data->sensor_platform_info->i2c_conf->use_i2c_mux)
 		msm_sensor_disable_i2c_mux(
@@ -729,7 +1522,8 @@
 		return rc;
 	}
 
-	CDBG("msm_sensor id: %d\n", chipid);
+	CDBG("%s msm_sensor id: %x, exp id: %x\n", __func__, chipid,
+		s_ctrl->sensor_id_info->sensor_id);
 	if (chipid != s_ctrl->sensor_id_info->sensor_id) {
 		pr_err("msm_sensor_match_id chip id doesnot match\n");
 		return -ENODEV;
@@ -810,6 +1604,90 @@
 	return rc;
 }
 
+static int msm_sensor_subdev_match_core(struct device *dev, void *data)
+{
+	int core_index = (int)data;
+	struct platform_device *pdev = to_platform_device(dev);
+	CDBG("%s cci pdev %p\n", __func__, pdev);
+	if (pdev->id == core_index)
+		return 1;
+	else
+		return 0;
+}
+
+int32_t msm_sensor_platform_probe(struct platform_device *pdev, void *data)
+{
+	int32_t rc = 0;
+	struct msm_sensor_ctrl_t *s_ctrl = (struct msm_sensor_ctrl_t *)data;
+	struct device_driver *driver;
+	struct device *dev;
+	s_ctrl->pdev = pdev;
+	CDBG("%s called data %p\n", __func__, data);
+	if (pdev->dev.of_node) {
+		rc = msm_sensor_init_sensor_data(pdev, s_ctrl);
+		if (rc < 0) {
+			pr_err("%s failed line %d\n", __func__, __LINE__);
+			return rc;
+		}
+	}
+	s_ctrl->sensor_i2c_client->cci_client = kzalloc(sizeof(
+		struct msm_camera_cci_client), GFP_KERNEL);
+	if (!s_ctrl->sensor_i2c_client->cci_client) {
+		pr_err("%s failed line %d\n", __func__, __LINE__);
+		return rc;
+	}
+	driver = driver_find(MSM_CCI_DRV_NAME, &platform_bus_type);
+	if (!driver) {
+		pr_err("%s failed line %d\n", __func__, __LINE__);
+		return rc;
+	}
+
+	dev = driver_find_device(driver, NULL, 0,
+				msm_sensor_subdev_match_core);
+	if (!dev) {
+		pr_err("%s failed line %d\n", __func__, __LINE__);
+		return rc;
+	}
+	s_ctrl->sensor_i2c_client->cci_client->cci_subdev =
+		dev_get_drvdata(dev);
+	CDBG("%s sd %p\n", __func__,
+		s_ctrl->sensor_i2c_client->cci_client->cci_subdev);
+	s_ctrl->sensor_i2c_client->cci_client->cci_i2c_master = MASTER_0;
+	s_ctrl->sensor_i2c_client->cci_client->sid =
+		s_ctrl->sensor_i2c_addr >> 1;
+	s_ctrl->sensor_i2c_client->cci_client->retries = 0;
+	s_ctrl->sensor_i2c_client->cci_client->id_map = 0;
+
+	rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
+	if (rc < 0) {
+		pr_err("%s %s power up failed\n", __func__,
+			pdev->id_entry->name);
+		return rc;
+	}
+
+	if (s_ctrl->func_tbl->sensor_match_id)
+		rc = s_ctrl->func_tbl->sensor_match_id(s_ctrl);
+	else
+		rc = msm_sensor_match_id(s_ctrl);
+	if (rc < 0)
+		goto probe_fail;
+
+	v4l2_subdev_init(&s_ctrl->sensor_v4l2_subdev,
+		s_ctrl->sensor_v4l2_subdev_ops);
+	snprintf(s_ctrl->sensor_v4l2_subdev.name,
+		sizeof(s_ctrl->sensor_v4l2_subdev.name), "%s",
+		s_ctrl->sensordata->sensor_name);
+	v4l2_set_subdevdata(&s_ctrl->sensor_v4l2_subdev, pdev);
+	msm_sensor_register(&s_ctrl->sensor_v4l2_subdev);
+
+	goto power_down;
+probe_fail:
+	pr_err("%s %s probe failed\n", __func__, pdev->id_entry->name);
+power_down:
+	s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+	return rc;
+}
+
 int32_t msm_sensor_power(struct v4l2_subdev *sd, int on)
 {
 	int rc = 0;
diff --git a/drivers/media/video/msm/sensors/msm_sensor.h b/drivers/media/video/msm/sensors/msm_sensor.h
index a3ddaa7..dc394e1 100644
--- a/drivers/media/video/msm/sensors/msm_sensor.h
+++ b/drivers/media/video/msm/sensors/msm_sensor.h
@@ -22,6 +22,9 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gpio.h>
 #include <mach/camera.h>
 #include <mach/gpio.h>
 #include <media/msm_camera.h>
@@ -35,6 +38,13 @@
 #define MSM_SENSOR_MCLK_16HZ 16000000
 #define MSM_SENSOR_MCLK_24HZ 24000000
 
+struct gpio_tlmm_cfg {
+	uint32_t gpio;
+	uint32_t dir;
+	uint32_t pull;
+	uint32_t drvstr;
+};
+
 enum msm_sensor_reg_update {
 	/* Sensor egisters that need to be updated during initialization */
 	MSM_SENSOR_REG_INIT,
@@ -151,6 +161,7 @@
 	struct i2c_client *msm_sensor_client;
 	struct i2c_driver *sensor_i2c_driver;
 	struct msm_camera_i2c_client *sensor_i2c_client;
+	struct platform_device *pdev;
 	uint16_t sensor_i2c_addr;
 
 	struct msm_sensor_output_reg_addr_t *sensor_output_reg_addr;
@@ -212,6 +223,9 @@
 int32_t msm_sensor_match_id(struct msm_sensor_ctrl_t *s_ctrl);
 int msm_sensor_i2c_probe(struct i2c_client *client,
 	const struct i2c_device_id *id);
+
+int32_t msm_sensor_platform_probe(struct platform_device *pdev, void *data);
+
 int32_t msm_sensor_power(struct v4l2_subdev *sd, int on);
 
 int32_t msm_sensor_v4l2_s_ctrl(struct v4l2_subdev *sd,
@@ -256,6 +270,8 @@
 int32_t msm_sensor_get_csi_params(struct msm_sensor_ctrl_t *s_ctrl,
 		struct csi_lane_params_t *sensor_output_info);
 
+int32_t msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl);
+
 struct msm_sensor_ctrl_t *get_sctrl(struct v4l2_subdev *sd);
 
 #define VIDIOC_MSM_SENSOR_CFG \
diff --git a/drivers/media/video/msm/sensors/ov2720.c b/drivers/media/video/msm/sensors/ov2720.c
index e4c5061..e08cd0a 100644
--- a/drivers/media/video/msm/sensors/ov2720.c
+++ b/drivers/media/video/msm/sensors/ov2720.c
@@ -749,11 +749,51 @@
 	.addr_type = MSM_CAMERA_I2C_WORD_ADDR,
 };
 
+
+static const struct of_device_id ov2720_dt_match[] = {
+	{.compatible = "qcom,ov2720", .data = &ov2720_s_ctrl},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, ov2720_dt_match);
+
+static struct platform_driver ov2720_platform_driver = {
+	.driver = {
+		.name = "qcom,ov2720",
+		.owner = THIS_MODULE,
+		.of_match_table = ov2720_dt_match,
+	},
+};
+
+static int32_t ov2720_platform_probe(struct platform_device *pdev)
+{
+	int32_t rc = 0;
+	const struct of_device_id *match;
+	match = of_match_device(ov2720_dt_match, &pdev->dev);
+	rc = msm_sensor_platform_probe(pdev, match->data);
+	return rc;
+}
+
 static int __init msm_sensor_init_module(void)
 {
+	int32_t rc = 0;
+	rc = platform_driver_probe(&ov2720_platform_driver,
+		ov2720_platform_probe);
+	if (!rc)
+		return rc;
 	return i2c_add_driver(&ov2720_i2c_driver);
 }
 
+static void __exit msm_sensor_exit_module(void)
+{
+	if (ov2720_s_ctrl.pdev) {
+		msm_sensor_free_sensor_data(&ov2720_s_ctrl);
+		platform_driver_unregister(&ov2720_platform_driver);
+	} else
+		i2c_del_driver(&ov2720_i2c_driver);
+	return;
+}
+
 static struct v4l2_subdev_core_ops ov2720_subdev_core_ops = {
 	.ioctl = msm_sensor_subdev_ioctl,
 	.s_power = msm_sensor_power,
@@ -824,6 +864,7 @@
 };
 
 module_init(msm_sensor_init_module);
+module_exit(msm_sensor_exit_module);
 MODULE_DESCRIPTION("Omnivision 2MP Bayer sensor driver");
 MODULE_LICENSE("GPL v2");
 
diff --git a/drivers/media/video/msm/sensors/s5k3l1yx.c b/drivers/media/video/msm/sensors/s5k3l1yx.c
index f480a1c..64b004e 100644
--- a/drivers/media/video/msm/sensors/s5k3l1yx.c
+++ b/drivers/media/video/msm/sensors/s5k3l1yx.c
@@ -618,11 +618,50 @@
 	.addr_type = MSM_CAMERA_I2C_WORD_ADDR,
 };
 
+static const struct of_device_id s5k3l1yx_dt_match[] = {
+	{.compatible = "qcom,s5k3l1yx", .data = &s5k3l1yx_s_ctrl},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, s5k3l1yx_dt_match);
+
+static struct platform_driver s5k3l1yx_platform_driver = {
+	.driver = {
+		.name = "qcom,s5k3l1yx",
+		.owner = THIS_MODULE,
+		.of_match_table = s5k3l1yx_dt_match,
+	},
+};
+
+static int32_t s5k3l1yx_platform_probe(struct platform_device *pdev)
+{
+	int32_t rc = 0;
+	const struct of_device_id *match;
+	match = of_match_device(s5k3l1yx_dt_match, &pdev->dev);
+	rc = msm_sensor_platform_probe(pdev, match->data);
+	return rc;
+}
+
 static int __init msm_sensor_init_module(void)
 {
+	int32_t rc = 0;
+	rc = platform_driver_probe(&s5k3l1yx_platform_driver,
+		s5k3l1yx_platform_probe);
+	if (!rc)
+		return rc;
 	return i2c_add_driver(&s5k3l1yx_i2c_driver);
 }
 
+static void __exit msm_sensor_exit_module(void)
+{
+	if (s5k3l1yx_s_ctrl.pdev) {
+		msm_sensor_free_sensor_data(&s5k3l1yx_s_ctrl);
+		platform_driver_unregister(&s5k3l1yx_platform_driver);
+	} else
+		i2c_del_driver(&s5k3l1yx_i2c_driver);
+	return;
+}
+
 static struct v4l2_subdev_core_ops s5k3l1yx_subdev_core_ops = {
 	.ioctl = msm_sensor_subdev_ioctl,
 	.s_power = msm_sensor_power,
@@ -693,5 +732,6 @@
 };
 
 module_init(msm_sensor_init_module);
+module_exit(msm_sensor_exit_module);
 MODULE_DESCRIPTION("Samsung 12MP Bayer sensor driver");
 MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/video/msm/server/msm_cam_server.c b/drivers/media/video/msm/server/msm_cam_server.c
index deab77b..36b9576 100644
--- a/drivers/media/video/msm/server/msm_cam_server.c
+++ b/drivers/media/video/msm/server/msm_cam_server.c
@@ -18,7 +18,6 @@
 #include "msm_ispif.h"
 #include "msm_sensor.h"
 #include "msm_actuator.h"
-#include "msm_vfe32.h"
 #include "msm_csi_register.h"
 
 #ifdef CONFIG_MSM_CAMERA_DEBUG
@@ -2034,6 +2033,21 @@
 		}
 		g_server_dev.cpp_device[index] = sd;
 		break;
+	case CCI_DEV:
+		g_server_dev.cci_device = sd;
+		if (g_server_dev.irqr_device) {
+			if (index >= MAX_NUM_CCI_DEV) {
+				pr_err("%s Invalid CCI idx %d", __func__,
+					index);
+				err = -EINVAL;
+				break;
+			}
+			cam_hw_idx = MSM_CAM_HW_CCI + index;
+			g_server_dev.subdev_table[cam_hw_idx] = sd;
+			err = msm_cam_server_fill_sdev_irqnum(MSM_CAM_HW_CCI,
+				sd_info->irq_num);
+		}
+		break;
 	default:
 		break;
 	}
diff --git a/drivers/media/video/msm/vfe/Makefile b/drivers/media/video/msm/vfe/Makefile
new file mode 100644
index 0000000..8068e4f
--- /dev/null
+++ b/drivers/media/video/msm/vfe/Makefile
@@ -0,0 +1,19 @@
+GCC_VERSION      := $(shell $(CONFIG_SHELL) $(PWD)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/video/msm/server
+ifeq ($(GCC_VERSION),0404)
+CFLAGS_REMOVE_msm_vfe8x.o = -Wframe-larger-than=1024
+endif
+ifeq ($(CONFIG_MSM_CAMERA_V4L2),y)
+  obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a_v4l2.o
+  obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31_v4l2.o
+  obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31_v4l2.o
+else
+  obj-$(CONFIG_ARCH_MSM7X27A) += msm_vfe7x27a.o
+  obj-$(CONFIG_ARCH_MSM8X60) += msm_vfe31.o
+  obj-$(CONFIG_ARCH_MSM7X30) += msm_vfe31.o
+endif
+obj-$(CONFIG_ARCH_MSM_ARM11) += msm_vfe7x.o
+obj-$(CONFIG_ARCH_QSD8X50) += msm_vfe8x.o msm_vfe8x_proc.o
+obj-$(CONFIG_ARCH_MSM8960) += msm_vfe32.o
+obj-$(CONFIG_MSM_CAMERA_V4L2) += msm_vfe_stats_buf.o
diff --git a/drivers/media/video/msm/msm_vfe31.c b/drivers/media/video/msm/vfe/msm_vfe31.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe31.c
rename to drivers/media/video/msm/vfe/msm_vfe31.c
diff --git a/drivers/media/video/msm/msm_vfe31.h b/drivers/media/video/msm/vfe/msm_vfe31.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe31.h
rename to drivers/media/video/msm/vfe/msm_vfe31.h
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.c b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
similarity index 98%
rename from drivers/media/video/msm/msm_vfe31_v4l2.c
rename to drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
index 18168ee..a3037d5 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.c
+++ b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.c
@@ -63,12 +63,6 @@
 	uint32_t		vfeInterruptStatus1;
 };
 
-/*TODO: Why is V32 reference in arch/arm/mach-msm/include/mach/camera.h?*/
-#define VFE_MSG_V31_START VFE_MSG_V32_START
-#define VFE_MSG_V31_CAPTURE VFE_MSG_V32_CAPTURE
-#define VFE_MSG_V31_JPEG_CAPTURE VFE_MSG_V32_JPEG_CAPTURE
-#define VFE_MSG_V31_START_RECORDING VFE_MSG_V32_START_RECORDING
-
 static struct vfe31_cmd_type vfe31_cmd[] = {
 /* 0*/	{VFE_CMD_DUMMY_0},
 		{VFE_CMD_SET_CLK},
@@ -417,6 +411,25 @@
 	return 0L;
 }
 
+static unsigned long vfe31_stats_unregbuf(
+	struct msm_stats_reqbuf *req_buf)
+{
+	int i = 0, rc = 0;
+
+	for (i = 0; i < req_buf->num_buf; i++) {
+		rc = vfe31_ctrl->stats_ops.buf_unprepare(
+			vfe31_ctrl->stats_ops.stats_ctrl,
+			req_buf->stats_type, i,
+			vfe31_ctrl->stats_ops.client);
+		if (rc < 0) {
+			pr_err("%s: unreg stats buf (type = %d) err = %d",
+				__func__, req_buf->stats_type, rc);
+		return rc;
+		}
+	}
+	return 0L;
+}
+
 static int vfe_stats_awb_buf_init(
 	struct vfe_cmd_stats_buf *in)
 {
@@ -664,6 +677,9 @@
 {
 	uint32_t *ch_info;
 	uint32_t *axi_cfg = ao + V31_AXI_RESERVED;
+	uint32_t bus_cmd = *axi_cfg;
+	int i;
+
 	/* Update the corresponding write masters for each output*/
 	ch_info = axi_cfg + V31_AXI_CFG_LEN;
 	vfe31_ctrl->outpath.out0.ch0 = 0x0000FFFF & *ch_info;
@@ -711,10 +727,23 @@
 		return -EINVAL;
 	}
 
+	axi_cfg++;
 	msm_camera_io_memcpy(vfe31_ctrl->vfebase +
 		vfe31_cmd[VFE_CMD_AXI_OUT_CFG].offset, axi_cfg,
-		vfe31_cmd[VFE_CMD_AXI_OUT_CFG].length - V31_AXI_CH_INF_LEN -
-			V31_AXI_RESERVED_LEN);
+		V31_AXI_BUS_CFG_LEN);
+	axi_cfg += V31_AXI_BUS_CFG_LEN/4;
+	for (i = 0; i < ARRAY_SIZE(vfe31_AXI_WM_CFG); i++) {
+		msm_camera_io_w(*axi_cfg,
+		vfe31_ctrl->vfebase+vfe31_AXI_WM_CFG[i]);
+		axi_cfg += 3;
+		msm_camera_io_memcpy(
+			vfe31_ctrl->vfebase+vfe31_AXI_WM_CFG[i]+12,
+							axi_cfg, 12);
+		axi_cfg += 3;
+	}
+	msm_camera_io_w(bus_cmd, vfe31_ctrl->vfebase +
+					V31_AXI_BUS_CMD_OFF);
+
 	return 0;
 }
 
@@ -1401,11 +1430,11 @@
 				VFE_OUTPUTS_PREVIEW))
 			/* Configure primary channel */
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_START, VFE_MSG_OUTPUT_PRIMARY);
+				VFE_MSG_START, VFE_MSG_OUTPUT_PRIMARY);
 		else
 			/* Configure secondary channel */
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_START, VFE_MSG_OUTPUT_SECONDARY);
+				VFE_MSG_START, VFE_MSG_OUTPUT_SECONDARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
 				" for preview", __func__);
@@ -1424,7 +1453,7 @@
 			rc = -EFAULT;
 			goto proc_general_done;
 		}
-		rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_CAPTURE,
+		rc = vfe31_configure_pingpong_buffers(VFE_MSG_CAPTURE,
 			VFE_MSG_OUTPUT_PRIMARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1450,12 +1479,12 @@
 			}
 			/* Configure primary channel for JPEG */
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_JPEG_CAPTURE,
+				VFE_MSG_JPEG_CAPTURE,
 				VFE_MSG_OUTPUT_PRIMARY);
 		} else {
 			/* Configure primary channel */
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_CAPTURE,
+				VFE_MSG_CAPTURE,
 				VFE_MSG_OUTPUT_PRIMARY);
 		}
 		if (rc < 0) {
@@ -1465,7 +1494,7 @@
 			goto proc_general_done;
 		}
 		/* Configure secondary channel */
-		rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_CAPTURE,
+		rc = vfe31_configure_pingpong_buffers(VFE_MSG_CAPTURE,
 			VFE_MSG_OUTPUT_SECONDARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1489,13 +1518,13 @@
 			VFE_OUTPUTS_PREVIEW_AND_VIDEO) {
 			vfe31_ctrl->outpath.out1.inst_handle = temp1;
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_START_RECORDING,
+				VFE_MSG_START_RECORDING,
 				VFE_MSG_OUTPUT_SECONDARY);
 		} else if (vfe31_ctrl->operation_mode ==
 			VFE_OUTPUTS_VIDEO_AND_PREVIEW) {
 			vfe31_ctrl->outpath.out0.inst_handle = temp1;
 			rc = vfe31_configure_pingpong_buffers(
-				VFE_MSG_V31_START_RECORDING,
+				VFE_MSG_START_RECORDING,
 				VFE_MSG_OUTPUT_PRIMARY);
 		}
 		if (rc < 0) {
@@ -1936,7 +1965,7 @@
 		}
 		vfe31_ctrl->outpath.out0.inst_handle = temp1;
 		/* Configure primary channel */
-		rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_CAPTURE,
+		rc = vfe31_configure_pingpong_buffers(VFE_MSG_CAPTURE,
 			VFE_MSG_OUTPUT_PRIMARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -2242,11 +2271,11 @@
 		break;
 
 	case VFE_CMD_ZSL:
-		rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_START,
+		rc = vfe31_configure_pingpong_buffers(VFE_MSG_START,
 			VFE_MSG_OUTPUT_PRIMARY);
 		if (rc < 0)
 			goto proc_general_done;
-		rc = vfe31_configure_pingpong_buffers(VFE_MSG_V31_START,
+		rc = vfe31_configure_pingpong_buffers(VFE_MSG_START,
 			VFE_MSG_OUTPUT_SECONDARY);
 		if (rc < 0)
 			goto proc_general_done;
@@ -3334,6 +3363,22 @@
 				vfe31_ctrl->stats_ops.client);
 	}
 	break;
+	case VFE_CMD_STATS_UNREGBUF:
+	{
+		struct msm_stats_reqbuf *req_buf = NULL;
+		req_buf = (struct msm_stats_reqbuf *)cmd->value;
+		if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats reqbuf input size = %d,\n"
+				"struct size = %d, mitch match\n",
+				 __func__, cmd->length,
+				sizeof(struct msm_stats_reqbuf));
+			rc = -EINVAL ;
+			goto end;
+		}
+		rc = vfe31_stats_unregbuf(req_buf);
+	}
+	break;
 	default:
 		rc = -1;
 		pr_err("%s: cmd_type %d not supported", __func__,
@@ -3583,6 +3628,7 @@
 	case VFE_CMD_STATS_REQBUF:
 	case VFE_CMD_STATS_ENQUEUEBUF:
 	case VFE_CMD_STATS_FLUSH_BUFQ:
+	case VFE_CMD_STATS_UNREGBUF:
 		/* for easy porting put in one envelope */
 		rc = vfe_stats_bufq_sub_ioctl(cmd, vfe_params->data);
 		return rc;
diff --git a/drivers/media/video/msm/msm_vfe31_v4l2.h b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.h
similarity index 99%
rename from drivers/media/video/msm/msm_vfe31_v4l2.h
rename to drivers/media/video/msm/vfe/msm_vfe31_v4l2.h
index 6396966..60db8e5 100644
--- a/drivers/media/video/msm/msm_vfe31_v4l2.h
+++ b/drivers/media/video/msm/vfe/msm_vfe31_v4l2.h
@@ -216,12 +216,13 @@
 
 #define V31_OPERATION_CFG_LEN     32
 
-#define V31_AXI_OUT_OFF           0x00000038
+#define V31_AXI_BUS_CMD_OFF       0x00000038
+#define V31_AXI_OUT_OFF           0x0000003C
 #define V31_AXI_OUT_LEN           240
-#define V31_AXI_CH_INF_LEN        48
 #define V31_AXI_CFG_LEN           47
 #define V31_AXI_RESERVED            1
 #define V31_AXI_RESERVED_LEN        4
+#define V31_AXI_BUS_CFG_LEN       16
 
 #define V31_FRAME_SKIP_OFF        0x00000504
 #define V31_FRAME_SKIP_LEN        32
diff --git a/drivers/media/video/msm/msm_vfe32.c b/drivers/media/video/msm/vfe/msm_vfe32.c
similarity index 89%
rename from drivers/media/video/msm/msm_vfe32.c
rename to drivers/media/video/msm/vfe/msm_vfe32.c
index aa2b19d..a96a945 100644
--- a/drivers/media/video/msm/msm_vfe32.c
+++ b/drivers/media/video/msm/vfe/msm_vfe32.c
@@ -216,6 +216,27 @@
 		{VFE_CMD_GET_RGB_G_TABLE},
 		{VFE_CMD_GET_LA_TABLE},
 		{VFE_CMD_DEMOSAICV3_UPDATE},
+		{VFE_CMD_ACTIVE_REGION_CFG},
+/*130*/ {VFE_CMD_COLOR_PROCESSING_CONFIG},
+		{VFE_CMD_STATS_WB_AEC_CONFIG},
+		{VFE_CMD_STATS_WB_AEC_UPDATE},
+		{VFE_CMD_Y_GAMMA_CONFIG},
+		{VFE_CMD_SCALE_OUTPUT1_CONFIG},
+/*135*/ {VFE_CMD_SCALE_OUTPUT2_CONFIG},
+		{VFE_CMD_CAPTURE_RAW},
+		{VFE_CMD_STOP_LIVESHOT},
+		{VFE_CMD_RECONFIG_VFE},
+		{VFE_CMD_STATS_REQBUF},
+/*140*/	{VFE_CMD_STATS_ENQUEUEBUF},
+		{VFE_CMD_STATS_FLUSH_BUFQ},
+		{VFE_CMD_STATS_UNREGBUF},
+		{VFE_CMD_STATS_BG_START, V32_STATS_BG_LEN, V32_STATS_BG_OFF},
+		{VFE_CMD_STATS_BG_STOP},
+		{VFE_CMD_STATS_BF_START, V32_STATS_BF_LEN, V32_STATS_BF_OFF},
+/*145*/ {VFE_CMD_STATS_BF_STOP},
+		{VFE_CMD_STATS_BHIST_START, V32_STATS_BHIST_LEN,
+			V32_STATS_BHIST_OFF},
+/*147*/	{VFE_CMD_STATS_BHIST_STOP},
 };
 
 uint32_t vfe32_AXI_WM_CFG[] = {
@@ -358,8 +379,39 @@
 	"GET_RGB_G_TABLE",
 	"GET_LA_TABLE",
 	"DEMOSAICV3_UPDATE",
+	"DUMMY_11",
+	"DUMMY_12", /*130*/
+	"DUMMY_13",
+	"DUMMY_14",
+	"DUMMY_15",
+	"DUMMY_16",
+	"DUMMY_17", /*135*/
+	"DUMMY_18",
+	"DUMMY_19",
+	"DUMMY_20",
+	"STATS_REQBUF",
+	"STATS_ENQUEUEBUF", /*140*/
+	"STATS_FLUSH_BUFQ",
+	"STATS_UNREGBUF",
+	"STATS_BG_START",
+	"STATS_BG_STOP",
+	"STATS_BF_START", /*145*/
+	"STATS_BF_STOP",
+	"STATS_BHIST_START",
+	"STATS_BHIST_STOP",
+	"RESET_2",
 };
 
+uint8_t vfe32_use_bayer_stats(struct vfe32_ctrl_type *vfe32_ctrl)
+{
+	if (vfe32_ctrl->ver_num.main >= 4) {
+		/* VFE 4 or above uses bayer stats */
+		return TRUE;
+	} else {
+		return FALSE;
+	}
+}
+
 static void vfe32_stop(struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	unsigned long flags;
@@ -375,7 +427,7 @@
 	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_0);
 	msm_camera_io_w(VFE_DISABLE_ALL_IRQS,
-			vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
+		vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_MASK_1);
 
 	/* clear all pending interrupts*/
 	msm_camera_io_w(VFE_CLEAR_ALL_IRQS,
@@ -417,6 +469,8 @@
 	uint32_t *ch_info;
 	uint32_t *axi_cfg = ao+V32_AXI_BUS_FMT_OFF;
 	int vfe_mode = (mode & ~(OUTPUT_TERT1|OUTPUT_TERT2));
+	uint32_t bus_cmd = *axi_cfg;
+	int i;
 
 	/* Update the corresponding write masters for each output*/
 	ch_info = axi_cfg + V32_AXI_CFG_LEN;
@@ -491,10 +545,23 @@
 bus_cfg:
 	msm_camera_io_w(*ao, axi_ctrl->share_ctrl->vfebase +
 		VFE_BUS_IO_FORMAT_CFG);
+	axi_cfg++;
 	msm_camera_io_memcpy(axi_ctrl->share_ctrl->vfebase +
 		vfe32_cmd[VFE_CMD_AXI_OUT_CFG].offset, axi_cfg,
-		vfe32_cmd[VFE_CMD_AXI_OUT_CFG].length - V32_AXI_CH_INF_LEN
-		- V32_AXI_BUS_FMT_LEN);
+		V32_AXI_BUS_CFG_LEN);
+	axi_cfg += V32_AXI_BUS_CFG_LEN/4;
+	for (i = 0; i < ARRAY_SIZE(vfe32_AXI_WM_CFG); i++) {
+		msm_camera_io_w(*axi_cfg,
+			axi_ctrl->share_ctrl->vfebase+vfe32_AXI_WM_CFG[i]);
+		axi_cfg += 3;
+		msm_camera_io_memcpy(
+			axi_ctrl->share_ctrl->vfebase+vfe32_AXI_WM_CFG[i]+12,
+								axi_cfg, 12);
+		axi_cfg += 3;
+	}
+	/* TODO: Only reload for 1st axi config for concurrent case */
+	msm_camera_io_w(bus_cmd, axi_ctrl->share_ctrl->vfebase +
+					V32_AXI_BUS_CMD_OFF);
 	return 0;
 }
 
@@ -511,7 +578,7 @@
 	vfe32_ctrl->share_ctrl->stop_ack_pending  = FALSE;
 	spin_unlock_irqrestore(&vfe32_ctrl->share_ctrl->stop_flag_lock, flags);
 
-	vfe32_ctrl->reset_ack_pending  = FALSE;
+	init_completion(&vfe32_ctrl->reset_complete);
 
 	spin_lock_irqsave(&vfe32_ctrl->update_ack_lock, flags);
 	vfe32_ctrl->update_ack_pending = FALSE;
@@ -530,13 +597,16 @@
 	/* this is unsigned 32 bit integer. */
 	vfe32_ctrl->share_ctrl->vfeFrameId = 0;
 	/* Stats control variables. */
-	memset(&(vfe32_ctrl->afStatsControl), 0,
+	memset(&(vfe32_ctrl->afbfStatsControl), 0,
 		sizeof(struct vfe_stats_control));
 
 	memset(&(vfe32_ctrl->awbStatsControl), 0,
 		sizeof(struct vfe_stats_control));
 
-	memset(&(vfe32_ctrl->aecStatsControl), 0,
+	memset(&(vfe32_ctrl->aecbgStatsControl), 0,
+		sizeof(struct vfe_stats_control));
+
+	memset(&(vfe32_ctrl->bhistStatsControl), 0,
 		sizeof(struct vfe_stats_control));
 
 	memset(&(vfe32_ctrl->ihistStatsControl), 0,
@@ -553,7 +623,63 @@
 	vfe32_ctrl->snapshot_frame_cnt = 0;
 }
 
-static void vfe32_reset(struct vfe32_ctrl_type *vfe32_ctrl)
+static void vfe32_program_dmi_cfg(
+	enum VFE32_DMI_RAM_SEL bankSel,
+	struct vfe32_ctrl_type *vfe32_ctrl)
+{
+	/* set bit 8 for auto increment. */
+	uint32_t value = VFE_DMI_CFG_DEFAULT;
+	value += (uint32_t)bankSel;
+	CDBG("%s: banksel = %d\n", __func__, bankSel);
+
+	msm_camera_io_w(value, vfe32_ctrl->share_ctrl->vfebase +
+		VFE_DMI_CFG);
+	/* by default, always starts with offset 0.*/
+	msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+		VFE_DMI_ADDR);
+}
+
+static void vfe32_reset_dmi_tables(
+	struct vfe32_ctrl_type *vfe32_ctrl)
+{
+	int i = 0;
+
+	/* Reset Histogram LUTs */
+	CDBG("Reset Bayer histogram LUT : 0\n");
+	vfe32_program_dmi_cfg(STATS_BHIST_RAM0, vfe32_ctrl);
+	/* Loop for configuring LUT */
+	for (i = 0; i < 256; i++) {
+		msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_DMI_DATA_HI);
+		msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_DMI_DATA_LO);
+	}
+	vfe32_program_dmi_cfg(NO_MEM_SELECTED, vfe32_ctrl);
+
+	CDBG("Reset Bayer Histogram LUT: 1\n");
+	vfe32_program_dmi_cfg(STATS_BHIST_RAM1, vfe32_ctrl);
+	/* Loop for configuring LUT */
+	for (i = 0; i < 256; i++) {
+		msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_DMI_DATA_HI);
+		msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_DMI_DATA_LO);
+	}
+	vfe32_program_dmi_cfg(NO_MEM_SELECTED, vfe32_ctrl);
+
+	CDBG("Reset IHistogram LUT\n");
+	vfe32_program_dmi_cfg(STATS_IHIST_RAM, vfe32_ctrl);
+	/* Loop for configuring LUT */
+	for (i = 0; i < 256; i++) {
+		msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_DMI_DATA_HI);
+		msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_DMI_DATA_LO);
+	}
+	vfe32_program_dmi_cfg(NO_MEM_SELECTED, vfe32_ctrl);
+}
+
+static int vfe32_reset(struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	vfe32_reset_internal_variables(vfe32_ctrl);
 	/* disable all interrupts.  vfeImaskLocal is also reset to 0
@@ -572,7 +698,8 @@
 
 	/* Ensure the write order while writing
 	to the command register using the barrier */
-	msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase + VFE_IRQ_CMD);
+	msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
+		VFE_IRQ_CMD);
 
 	/* enable reset_ack interrupt.  */
 	msm_camera_io_w(VFE_IMASK_WHILE_STOPPING_1,
@@ -587,6 +714,12 @@
 	to the command register using the barrier */
 	msm_camera_io_w_mb(VFE_RESET_UPON_RESET_CMD,
 		vfe32_ctrl->share_ctrl->vfebase + VFE_GLOBAL_RESET);
+
+	if (vfe32_ctrl->is_reset_blocking)
+		return wait_for_completion_interruptible(
+				&vfe32_ctrl->reset_complete);
+	else
+		return 0;
 }
 
 static int vfe32_operation_config(uint32_t *cmd,
@@ -677,6 +810,26 @@
 	return 0L;
 }
 
+
+static unsigned long vfe32_stats_unregbuf(
+	struct vfe32_ctrl_type *vfe32_ctrl,
+	struct msm_stats_reqbuf *req_buf)
+{
+	int i = 0, rc = 0;
+
+	for (i = 0; i < req_buf->num_buf; i++) {
+		rc = vfe32_ctrl->stats_ops.buf_unprepare(
+			vfe32_ctrl->stats_ops.stats_ctrl,
+			req_buf->stats_type, i,
+			vfe32_ctrl->stats_ops.client);
+		if (rc < 0) {
+			pr_err("%s: unreg stats buf (type = %d) err = %d",
+				__func__, req_buf->stats_type, rc);
+		return rc;
+		}
+	}
+	return 0L;
+}
 static int vfe_stats_awb_buf_init(
 	struct vfe32_ctrl_type *vfe32_ctrl,
 	struct vfe_cmd_stats_buf *in)
@@ -708,14 +861,18 @@
 	return 0;
 }
 
-static int vfe_stats_aec_buf_init(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
+static uint32_t vfe_stats_aec_bg_buf_init(
+	struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	uint32_t addr;
 	unsigned long flags;
+	uint32_t stats_type;
 
+	stats_type =
+		(!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AEC
+			: MSM_STATS_TYPE_BG;
 	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
-	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
 	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
 	if (!addr) {
 		pr_err("%s: dq aec ping buf from free buf queue",
@@ -724,9 +881,9 @@
 	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
-		VFE_BUS_STATS_AEC_WR_PING_ADDR);
+		VFE_BUS_STATS_AEC_BG_WR_PING_ADDR);
 	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
-	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
 	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
 	if (!addr) {
 		pr_err("%s: dq aec pong buf from free buf queue",
@@ -735,26 +892,31 @@
 	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
-		VFE_BUS_STATS_AEC_WR_PONG_ADDR);
+		VFE_BUS_STATS_AEC_BG_WR_PONG_ADDR);
 	return 0;
 }
 
-static int vfe_stats_af_buf_init(
-	struct vfe32_ctrl_type *vfe32_ctrl, struct vfe_cmd_stats_buf *in)
+static int vfe_stats_af_bf_buf_init(
+	struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	uint32_t addr;
 	unsigned long flags;
 	int rc = 0;
 
+	uint32_t stats_type;
+	stats_type =
+		(!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+			: MSM_STATS_TYPE_BF;
+
 	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
-	rc = vfe32_stats_flush_enqueue(vfe32_ctrl, MSM_STATS_TYPE_AF);
+	rc = vfe32_stats_flush_enqueue(vfe32_ctrl, stats_type);
 	if (rc < 0) {
 		pr_err("%s: dq stats buf err = %d",
 			   __func__, rc);
 		spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
 		return -EINVAL;
 	}
-	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
 	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
 	if (!addr) {
 		pr_err("%s: dq af ping buf from free buf queue", __func__);
@@ -762,9 +924,9 @@
 	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
-		VFE_BUS_STATS_AF_WR_PING_ADDR);
+		VFE_BUS_STATS_AF_BF_WR_PING_ADDR);
 	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
-	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
 	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
 	if (!addr) {
 		pr_err("%s: dq af pong buf from free buf queue", __func__);
@@ -772,14 +934,44 @@
 	}
 	msm_camera_io_w(addr,
 		vfe32_ctrl->share_ctrl->vfebase +
-		VFE_BUS_STATS_AF_WR_PONG_ADDR);
+		VFE_BUS_STATS_AF_BF_WR_PONG_ADDR);
+	return 0;
+}
+
+static uint32_t vfe_stats_bhist_buf_init(
+	struct vfe32_ctrl_type *vfe32_ctrl)
+{
+	uint32_t addr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_BHIST);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq ihist ping buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe32_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_SKIN_BHIST_WR_PING_ADDR);
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_BHIST);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (!addr) {
+		pr_err("%s: dq ihist pong buf from free buf queue",
+			__func__);
+		return -ENOMEM;
+	}
+	msm_camera_io_w(addr,
+		vfe32_ctrl->share_ctrl->vfebase +
+		VFE_BUS_STATS_SKIN_BHIST_WR_PONG_ADDR);
 
 	return 0;
 }
 
 static int vfe_stats_ihist_buf_init(
-	struct vfe32_ctrl_type *vfe32_ctrl,
-	struct vfe_cmd_stats_buf *in)
+	struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	uint32_t addr;
 	unsigned long flags;
@@ -811,8 +1003,7 @@
 }
 
 static int vfe_stats_rs_buf_init(
-	struct vfe32_ctrl_type *vfe32_ctrl,
-	struct vfe_cmd_stats_buf *in)
+	struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	uint32_t addr;
 	unsigned long flags;
@@ -841,8 +1032,7 @@
 }
 
 static int vfe_stats_cs_buf_init(
-	struct vfe32_ctrl_type *vfe32_ctrl,
-	struct vfe_cmd_stats_buf *in)
+	struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	uint32_t addr;
 	unsigned long flags;
@@ -918,6 +1108,9 @@
 		msm_camera_io_w_mb(1, vfe32_ctrl->share_ctrl->vfebase +
 			VFE_CAMIF_COMMAND);
 	}
+	msm_camera_io_dump(vfe32_ctrl->share_ctrl->vfebase,
+		vfe32_ctrl->share_ctrl->register_total * 4);
+
 	/* Ensure the write order while writing
 	to the command register using the barrier */
 	atomic_set(&vfe32_ctrl->share_ctrl->vstate, 1);
@@ -1372,19 +1565,7 @@
 		vfe32_ctrl->share_ctrl->vfebase + V32_TIMER_SELECT_OFF);
 }
 
-static void vfe32_program_dmi_cfg(
-	enum VFE32_DMI_RAM_SEL bankSel,
-	struct vfe32_ctrl_type *vfe32_ctrl)
-{
-	/* set bit 8 for auto increment. */
-	uint32_t value = VFE_DMI_CFG_DEFAULT;
-	value += (uint32_t)bankSel;
-	CDBG("%s: banksel = %d\n", __func__, bankSel);
 
-	msm_camera_io_w(value, vfe32_ctrl->share_ctrl->vfebase + VFE_DMI_CFG);
-	/* by default, always starts with offset 0.*/
-	msm_camera_io_w(0, vfe32_ctrl->share_ctrl->vfebase + VFE_DMI_ADDR);
-}
 static void vfe32_write_gamma_cfg(
 	enum VFE32_DMI_RAM_SEL channel_sel,
 	const uint32_t *tbl,
@@ -1582,7 +1763,7 @@
 	struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	int i , rc = 0;
-	uint32_t old_val = 0 , new_val = 0;
+	uint32_t old_val = 0 , new_val = 0, module_val = 0;
 	uint32_t *cmdp = NULL;
 	uint32_t *cmdp_local = NULL;
 	uint32_t snapshot_cnt = 0;
@@ -1595,6 +1776,7 @@
 	case VFE_CMD_RESET:
 		pr_info("vfe32_proc_general: cmdID = %s\n",
 			vfe32_general_cmd[cmd->id]);
+		vfe32_ctrl->is_reset_blocking = false;
 		vfe32_reset(vfe32_ctrl);
 		break;
 	case VFE_CMD_START:
@@ -1609,25 +1791,25 @@
 				VFE_OUTPUTS_PREVIEW))
 				/* Configure primary channel */
 				rc = vfe32_configure_pingpong_buffers(
-					VFE_MSG_V32_START,
+					VFE_MSG_START,
 					VFE_MSG_OUTPUT_PRIMARY,
 					vfe32_ctrl);
 			else
 			/* Configure secondary channel */
 				rc = vfe32_configure_pingpong_buffers(
-					VFE_MSG_V32_START,
+					VFE_MSG_START,
 					VFE_MSG_OUTPUT_SECONDARY,
 					vfe32_ctrl);
 		}
 		if (vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_RDI0)
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_START, VFE_MSG_OUTPUT_TERTIARY1,
+				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY1,
 				vfe32_ctrl);
 		if (vfe32_ctrl->share_ctrl->operation_mode &
 				VFE_OUTPUTS_RDI1)
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_START, VFE_MSG_OUTPUT_TERTIARY2,
+				VFE_MSG_START, VFE_MSG_OUTPUT_TERTIARY2,
 				vfe32_ctrl);
 
 		if (rc < 0) {
@@ -1649,7 +1831,7 @@
 			goto proc_general_done;
 		}
 		rc = vfe32_configure_pingpong_buffers(
-			VFE_MSG_V32_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
+			VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_PRIMARY,
 			vfe32_ctrl);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1677,13 +1859,13 @@
 			}
 			/* Configure primary channel for JPEG */
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_JPEG_CAPTURE,
+				VFE_MSG_JPEG_CAPTURE,
 				VFE_MSG_OUTPUT_PRIMARY,
 				vfe32_ctrl);
 		} else {
 			/* Configure primary channel */
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_CAPTURE,
+				VFE_MSG_CAPTURE,
 				VFE_MSG_OUTPUT_PRIMARY,
 				vfe32_ctrl);
 		}
@@ -1695,7 +1877,7 @@
 		}
 		/* Configure secondary channel */
 		rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
+				VFE_MSG_CAPTURE, VFE_MSG_OUTPUT_SECONDARY,
 				vfe32_ctrl);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1720,7 +1902,7 @@
 			vfe32_ctrl->share_ctrl->outpath.out1.inst_handle =
 				temp1;
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_START_RECORDING,
+				VFE_MSG_START_RECORDING,
 				VFE_MSG_OUTPUT_SECONDARY,
 				vfe32_ctrl);
 		} else if (vfe32_ctrl->share_ctrl->operation_mode &
@@ -1728,7 +1910,7 @@
 			vfe32_ctrl->share_ctrl->outpath.out0.inst_handle =
 				temp1;
 			rc = vfe32_configure_pingpong_buffers(
-				VFE_MSG_V32_START_RECORDING,
+				VFE_MSG_START_RECORDING,
 				VFE_MSG_OUTPUT_PRIMARY,
 				vfe32_ctrl);
 		}
@@ -1762,7 +1944,12 @@
 		break;
 
 	case VFE_CMD_STATS_AE_START: {
-		rc = vfe_stats_aec_buf_init(vfe32_ctrl, NULL);
+		if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+			/* Error */
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		rc = vfe_stats_aec_bg_buf_init(vfe32_ctrl);
 		if (rc < 0) {
 			pr_err("%s: cannot config ping/pong address of AEC",
 				 __func__);
@@ -1791,7 +1978,12 @@
 		}
 		break;
 	case VFE_CMD_STATS_AF_START: {
-		rc = vfe_stats_af_buf_init(vfe32_ctrl, NULL);
+		if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+			/* Error */
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		rc = vfe_stats_af_bf_buf_init(vfe32_ctrl);
 		if (rc < 0) {
 			pr_err("%s: cannot config ping/pong address of AF",
 				__func__);
@@ -1820,6 +2012,11 @@
 		}
 		break;
 	case VFE_CMD_STATS_AWB_START: {
+		if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+			/* Error */
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
 		rc = vfe_stats_awb_buf_init(vfe32_ctrl, NULL);
 		if (rc < 0) {
 			pr_err("%s: cannot config ping/pong address of AWB",
@@ -1850,7 +2047,7 @@
 		break;
 
 	case VFE_CMD_STATS_IHIST_START: {
-		rc = vfe_stats_ihist_buf_init(vfe32_ctrl, NULL);
+		rc = vfe_stats_ihist_buf_init(vfe32_ctrl);
 		if (rc < 0) {
 			pr_err("%s: cannot config ping/pong address of IHIST",
 				 __func__);
@@ -1881,7 +2078,7 @@
 
 
 	case VFE_CMD_STATS_RS_START: {
-		rc = vfe_stats_rs_buf_init(vfe32_ctrl, NULL);
+		rc = vfe_stats_rs_buf_init(vfe32_ctrl);
 		if (rc < 0) {
 			pr_err("%s: cannot config ping/pong address of RS",
 				__func__);
@@ -1906,7 +2103,7 @@
 		break;
 
 	case VFE_CMD_STATS_CS_START: {
-		rc = vfe_stats_cs_buf_init(vfe32_ctrl, NULL);
+		rc = vfe_stats_cs_buf_init(vfe32_ctrl);
 		if (rc < 0) {
 			pr_err("%s: cannot config ping/pong address of CS",
 				__func__);
@@ -1930,6 +2127,67 @@
 		}
 		break;
 
+	case VFE_CMD_STATS_BG_START:
+	case VFE_CMD_STATS_BF_START:
+	case VFE_CMD_STATS_BHIST_START: {
+		if (!vfe32_use_bayer_stats(vfe32_ctrl)) {
+			/* Error */
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_camera_io_r(
+			vfe32_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+		module_val = msm_camera_io_r(
+			vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		if (VFE_CMD_STATS_BG_START == cmd->id) {
+			module_val |= AE_BG_ENABLE_MASK;
+			old_val |= STATS_BG_ENABLE_MASK;
+			rc = vfe_stats_aec_bg_buf_init(vfe32_ctrl);
+			if (rc < 0) {
+				pr_err("%s: cannot config ping/pong address of CS",
+					__func__);
+				goto proc_general_done;
+			}
+		} else if (VFE_CMD_STATS_BF_START == cmd->id) {
+			module_val |= AF_BF_ENABLE_MASK;
+			old_val |= STATS_BF_ENABLE_MASK;
+			rc = vfe_stats_af_bf_buf_init(vfe32_ctrl);
+			if (rc < 0) {
+				pr_err("%s: cannot config ping/pong address of CS",
+					__func__);
+				goto proc_general_done;
+			}
+		} else {
+			module_val |= SKIN_BHIST_ENABLE_MASK;
+			old_val |= STATS_BHIST_ENABLE_MASK;
+			rc = vfe_stats_bhist_buf_init(vfe32_ctrl);
+			if (rc < 0) {
+				pr_err("%s: cannot config ping/pong address of CS",
+					__func__);
+				goto proc_general_done;
+			}
+		}
+		msm_camera_io_w(old_val, vfe32_ctrl->share_ctrl->vfebase +
+			VFE_STATS_CFG);
+		msm_camera_io_w(module_val,
+			vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
+		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
+		if (!cmdp) {
+			rc = -ENOMEM;
+			goto proc_general_done;
+		}
+		if (copy_from_user(cmdp,
+				(void __user *)(cmd->value),
+				cmd->length)) {
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		msm_camera_io_memcpy(
+			vfe32_ctrl->share_ctrl->vfebase +
+			vfe32_cmd[cmd->id].offset,
+			cmdp, (vfe32_cmd[cmd->id].length));
+		}
+		break;
 	case VFE_CMD_MCE_UPDATE:
 	case VFE_CMD_MCE_CFG:{
 		cmdp = kmalloc(cmd->length, GFP_ATOMIC);
@@ -2212,7 +2470,7 @@
 		}
 		vfe32_ctrl->share_ctrl->outpath.out0.inst_handle = temp1;
 		/* Configure primary channel */
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_V32_CAPTURE,
+		rc = vfe32_configure_pingpong_buffers(VFE_MSG_CAPTURE,
 					VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -2601,6 +2859,11 @@
 		break;
 
 	case VFE_CMD_STATS_AWB_STOP: {
+		if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+			/* Error */
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
 		old_val = msm_camera_io_r(
 			vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
 		old_val &= ~AWB_ENABLE_MASK;
@@ -2609,6 +2872,11 @@
 		}
 		break;
 	case VFE_CMD_STATS_AE_STOP: {
+		if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+			/* Error */
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
 		old_val = msm_camera_io_r(
 			vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
 		old_val &= ~AE_BG_ENABLE_MASK;
@@ -2617,17 +2885,16 @@
 		}
 		break;
 	case VFE_CMD_STATS_AF_STOP: {
+		if (vfe32_use_bayer_stats(vfe32_ctrl)) {
+			/* Error */
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
 		old_val = msm_camera_io_r(
 			vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
 		old_val &= ~AF_BF_ENABLE_MASK;
 		msm_camera_io_w(old_val,
 			vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
-		rc = vfe32_stats_flush_enqueue(vfe32_ctrl, MSM_STATS_TYPE_AF);
-		if (rc < 0) {
-			pr_err("%s: dq stats buf err = %d",
-				   __func__, rc);
-			return -EINVAL;
-		}
 		}
 		break;
 
@@ -2657,6 +2924,37 @@
 			vfe32_ctrl->share_ctrl->vfebase + VFE_MODULE_CFG);
 		}
 		break;
+
+	case VFE_CMD_STATS_BG_STOP:
+	case VFE_CMD_STATS_BF_STOP:
+	case VFE_CMD_STATS_BHIST_STOP: {
+		if (!vfe32_use_bayer_stats(vfe32_ctrl)) {
+			/* Error */
+			rc = -EFAULT;
+			goto proc_general_done;
+		}
+		old_val = msm_camera_io_r(
+			vfe32_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+		if (VFE_CMD_STATS_BG_STOP == cmd->id)
+			old_val &= ~STATS_BG_ENABLE_MASK;
+		else if (VFE_CMD_STATS_BF_STOP == cmd->id)
+			old_val &= ~STATS_BF_ENABLE_MASK;
+		else
+			old_val &= ~STATS_BHIST_ENABLE_MASK;
+		msm_camera_io_w(old_val,
+			vfe32_ctrl->share_ctrl->vfebase + VFE_STATS_CFG);
+		if (VFE_CMD_STATS_BF_STOP == cmd->id) {
+			rc = vfe32_stats_flush_enqueue(vfe32_ctrl,
+					MSM_STATS_TYPE_BF);
+			if (rc < 0) {
+				pr_err("%s: dq stats buf err = %d",
+					   __func__, rc);
+				return -EINVAL;
+			}
+		}
+		}
+		break;
+
 	case VFE_CMD_STOP:
 		pr_info("vfe32_proc_general: cmdID = %s\n",
 			vfe32_general_cmd[cmd->id]);
@@ -2703,11 +3001,11 @@
 		break;
 
 	case VFE_CMD_ZSL:
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_V32_START,
+		rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
 			VFE_MSG_OUTPUT_PRIMARY, vfe32_ctrl);
 		if (rc < 0)
 			goto proc_general_done;
-		rc = vfe32_configure_pingpong_buffers(VFE_MSG_V32_START,
+		rc = vfe32_configure_pingpong_buffers(VFE_MSG_START,
 			VFE_MSG_OUTPUT_SECONDARY, vfe32_ctrl);
 		if (rc < 0)
 			goto proc_general_done;
@@ -2975,6 +3273,12 @@
 		CDBG("%s Stopping liveshot ", __func__);
 		vfe32_stop_liveshot(pmctl, vfe32_ctrl);
 		break;
+	case VFE_CMD_RESET_2:
+		CDBG("vfe32_proc_general: cmdID = %s\n",
+			vfe32_general_cmd[cmd->id]);
+		vfe32_ctrl->is_reset_blocking = true;
+		vfe32_reset(vfe32_ctrl);
+		break;
 	default:
 		if (cmd->length != vfe32_cmd[cmd->id].length)
 			return -EINVAL;
@@ -3301,18 +3605,48 @@
 		vfe32_ctrl->share_ctrl->vfebase + VFE_CLAMP_MAX);
 
 	/* stats UB config */
-	msm_camera_io_w(0x3980007,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AEC_UB_CFG);
-	msm_camera_io_w(0x3A00007,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AF_UB_CFG);
-	msm_camera_io_w(0x3A8000F,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_AWB_UB_CFG);
-	msm_camera_io_w(0x3B80007,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_RS_UB_CFG);
-	msm_camera_io_w(0x3C0001F,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_CS_UB_CFG);
-	msm_camera_io_w(0x3E0001F,
-		vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_STATS_HIST_UB_CFG);
+	CDBG("%s: Use bayer stats = %d\n", __func__,
+		 vfe32_use_bayer_stats(vfe32_ctrl));
+	if (!vfe32_use_bayer_stats(vfe32_ctrl)) {
+		msm_camera_io_w(0x3980007,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_AEC_BG_UB_CFG);
+		msm_camera_io_w(0x3A00007,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_AF_BF_UB_CFG);
+		msm_camera_io_w(0x3A8000F,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_AWB_UB_CFG);
+		msm_camera_io_w(0x3B80007,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_RS_UB_CFG);
+		msm_camera_io_w(0x3C0001F,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_CS_UB_CFG);
+		msm_camera_io_w(0x3E0001F,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_HIST_UB_CFG);
+	} else {
+		msm_camera_io_w(0x350001F,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_HIST_UB_CFG);
+		msm_camera_io_w(0x370002F,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_AEC_BG_UB_CFG);
+		msm_camera_io_w(0x3A0002F,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_AF_BF_UB_CFG);
+		msm_camera_io_w(0x3D00007,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_RS_UB_CFG);
+		msm_camera_io_w(0x3D8001F,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_CS_UB_CFG);
+		msm_camera_io_w(0x3F80007,
+			vfe32_ctrl->share_ctrl->vfebase +
+				VFE_BUS_STATS_SKIN_BHIST_UB_CFG);
+	}
+	vfe32_reset_dmi_tables(vfe32_ctrl);
 }
 
 static void vfe32_process_reset_irq(
@@ -3338,8 +3672,12 @@
 		/* reload all write masters. (frame & line)*/
 		msm_camera_io_w(0x7FFF,
 			vfe32_ctrl->share_ctrl->vfebase + VFE_BUS_CMD);
-		vfe32_send_isp_msg(&vfe32_ctrl->subdev,
-			vfe32_ctrl->share_ctrl->vfeFrameId, MSG_ID_RESET_ACK);
+		if (vfe32_ctrl->is_reset_blocking)
+			complete(&vfe32_ctrl->reset_complete);
+		else
+			vfe32_send_isp_msg(&vfe32_ctrl->subdev,
+				vfe32_ctrl->share_ctrl->vfeFrameId,
+				MSG_ID_RESET_ACK);
 	}
 }
 
@@ -3785,25 +4123,36 @@
 	/* @todo This is causing issues, need further investigate */
 	/* spin_lock_irqsave(&ctrl->state_lock, flags); */
 	struct isp_msg_stats msgStats;
+	uint32_t stats_type;
 	msgStats.frameCounter = vfe32_ctrl->share_ctrl->vfeFrameId;
 	if (vfe32_ctrl->simultaneous_sof_stat)
 		msgStats.frameCounter--;
 	msgStats.buffer = bufAddress;
 	switch (statsNum) {
 	case statsAeNum:{
-		msgStats.id = MSG_ID_STATS_AEC;
+		msgStats.id =
+			(!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSG_ID_STATS_AEC
+				: MSG_ID_STATS_BG;
+		stats_type =
+			(!vfe32_use_bayer_stats(vfe32_ctrl)) ?
+				MSM_STATS_TYPE_AEC : MSM_STATS_TYPE_BG;
 		rc = vfe32_ctrl->stats_ops.dispatch(
 				vfe32_ctrl->stats_ops.stats_ctrl,
-				MSM_STATS_TYPE_AEC, bufAddress,
+				stats_type, bufAddress,
 				&msgStats.buf_idx, &vaddr, &msgStats.fd,
 				vfe32_ctrl->stats_ops.client);
 		}
 		break;
 	case statsAfNum:{
-		msgStats.id = MSG_ID_STATS_AF;
+		msgStats.id =
+			(!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSG_ID_STATS_AF
+				: MSG_ID_STATS_BF;
+		stats_type =
+			(!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+				: MSM_STATS_TYPE_BF;
 		rc = vfe32_ctrl->stats_ops.dispatch(
 				vfe32_ctrl->stats_ops.stats_ctrl,
-				MSM_STATS_TYPE_AF, bufAddress,
+				stats_type, bufAddress,
 				&msgStats.buf_idx, &vaddr, &msgStats.fd,
 				vfe32_ctrl->stats_ops.client);
 		}
@@ -3845,6 +4194,15 @@
 				vfe32_ctrl->stats_ops.client);
 		}
 		break;
+	case statsSkinNum: {
+		msgStats.id = MSG_ID_STATS_BHIST;
+		rc = vfe32_ctrl->stats_ops.dispatch(
+				vfe32_ctrl->stats_ops.stats_ctrl,
+				MSM_STATS_TYPE_BHIST, bufAddress,
+				&msgStats.buf_idx, &vaddr, &msgStats.fd,
+				vfe32_ctrl->stats_ops.client);
+		}
+		break;
 
 	default:
 		goto stats_done;
@@ -3875,9 +4233,9 @@
 
 	msgStats.status_bits = status_bits;
 
-	msgStats.aec.buff = vfe32_ctrl->aecStatsControl.bufToRender;
+	msgStats.aec.buff = vfe32_ctrl->aecbgStatsControl.bufToRender;
 	msgStats.awb.buff = vfe32_ctrl->awbStatsControl.bufToRender;
-	msgStats.af.buff = vfe32_ctrl->afStatsControl.bufToRender;
+	msgStats.af.buff = vfe32_ctrl->afbfStatsControl.bufToRender;
 
 	msgStats.ihist.buff = vfe32_ctrl->ihistStatsControl.bufToRender;
 	msgStats.rs.buff = vfe32_ctrl->rsStatsControl.bufToRender;
@@ -3892,24 +4250,28 @@
 				&msgStats);
 }
 
-static void vfe32_process_stats_ae_irq(struct vfe32_ctrl_type *vfe32_ctrl)
+static void vfe32_process_stats_ae_bg_irq(struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	unsigned long flags;
 	uint32_t addr;
+	uint32_t stats_type;
+	stats_type =
+		(!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AEC
+			: MSM_STATS_TYPE_BG;
 	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
-	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AEC);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
 	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
 	if (addr) {
-		vfe32_ctrl->aecStatsControl.bufToRender =
+		vfe32_ctrl->aecbgStatsControl.bufToRender =
 			vfe32_process_stats_irq_common(vfe32_ctrl, statsAeNum,
 			addr);
 
 		vfe_send_stats_msg(vfe32_ctrl,
-			vfe32_ctrl->aecStatsControl.bufToRender, statsAeNum);
+			vfe32_ctrl->aecbgStatsControl.bufToRender, statsAeNum);
 	} else{
-		vfe32_ctrl->aecStatsControl.droppedStatsFrameCount++;
+		vfe32_ctrl->aecbgStatsControl.droppedStatsFrameCount++;
 		CDBG("%s: droppedStatsFrameCount = %d", __func__,
-			vfe32_ctrl->aecStatsControl.droppedStatsFrameCount);
+			vfe32_ctrl->aecbgStatsControl.droppedStatsFrameCount);
 	}
 }
 
@@ -3934,24 +4296,50 @@
 	}
 }
 
-static void vfe32_process_stats_af_irq(struct vfe32_ctrl_type *vfe32_ctrl)
+static void vfe32_process_stats_af_bf_irq(struct vfe32_ctrl_type *vfe32_ctrl)
 {
 	unsigned long flags;
 	uint32_t addr;
+	uint32_t stats_type;
+	stats_type =
+		(!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+			: MSM_STATS_TYPE_BF;
 	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
-	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_AF);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, stats_type);
 	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
 	if (addr) {
-		vfe32_ctrl->afStatsControl.bufToRender =
+		vfe32_ctrl->afbfStatsControl.bufToRender =
 			vfe32_process_stats_irq_common(vfe32_ctrl, statsAfNum,
 			addr);
 
 		vfe_send_stats_msg(vfe32_ctrl,
-			vfe32_ctrl->afStatsControl.bufToRender, statsAfNum);
+			vfe32_ctrl->afbfStatsControl.bufToRender, statsAfNum);
 	} else{
-		vfe32_ctrl->afStatsControl.droppedStatsFrameCount++;
+		vfe32_ctrl->afbfStatsControl.droppedStatsFrameCount++;
 		CDBG("%s: droppedStatsFrameCount = %d", __func__,
-			vfe32_ctrl->afStatsControl.droppedStatsFrameCount);
+			vfe32_ctrl->afbfStatsControl.droppedStatsFrameCount);
+	}
+}
+
+static void vfe32_process_stats_bhist_irq(struct vfe32_ctrl_type *vfe32_ctrl)
+{
+	unsigned long flags;
+	uint32_t addr;
+	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
+	addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl, MSM_STATS_TYPE_BHIST);
+	spin_unlock_irqrestore(&vfe32_ctrl->stats_bufq_lock, flags);
+	if (addr) {
+		vfe32_ctrl->bhistStatsControl.bufToRender =
+			vfe32_process_stats_irq_common(vfe32_ctrl,
+				statsSkinNum, addr);
+
+		vfe_send_stats_msg(vfe32_ctrl,
+			vfe32_ctrl->bhistStatsControl.bufToRender,
+			statsSkinNum);
+	} else{
+		vfe32_ctrl->bhistStatsControl.droppedStatsFrameCount++;
+		CDBG("%s: droppedStatsFrameCount = %d", __func__,
+			vfe32_ctrl->bhistStatsControl.droppedStatsFrameCount);
 	}
 }
 
@@ -4026,23 +4414,28 @@
 	unsigned long flags;
 	int32_t process_stats = false;
 	uint32_t addr;
+	uint32_t stats_type;
 
 	CDBG("%s, stats = 0x%x\n", __func__, status_bits);
 	spin_lock_irqsave(&vfe32_ctrl->stats_bufq_lock, flags);
-	if (status_bits & VFE_IRQ_STATUS0_STATS_AEC) {
+	stats_type =
+		(!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AEC
+			: MSM_STATS_TYPE_BG;
+
+	if (status_bits & VFE_IRQ_STATUS0_STATS_AEC_BG) {
 		addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
-				MSM_STATS_TYPE_AEC);
+				stats_type);
 		if (addr) {
-			vfe32_ctrl->aecStatsControl.bufToRender =
+			vfe32_ctrl->aecbgStatsControl.bufToRender =
 				vfe32_process_stats_irq_common(
 				vfe32_ctrl, statsAeNum,	addr);
 			process_stats = true;
 		} else{
-			vfe32_ctrl->aecStatsControl.bufToRender = 0;
-			vfe32_ctrl->aecStatsControl.droppedStatsFrameCount++;
+			vfe32_ctrl->aecbgStatsControl.bufToRender = 0;
+			vfe32_ctrl->aecbgStatsControl.droppedStatsFrameCount++;
 		}
 	} else {
-		vfe32_ctrl->aecStatsControl.bufToRender = 0;
+		vfe32_ctrl->aecbgStatsControl.bufToRender = 0;
 	}
 
 	if (status_bits & VFE_IRQ_STATUS0_STATS_AWB) {
@@ -4062,21 +4455,24 @@
 		vfe32_ctrl->awbStatsControl.bufToRender = 0;
 	}
 
-	if (status_bits & VFE_IRQ_STATUS0_STATS_AF) {
+	stats_type =
+		(!vfe32_use_bayer_stats(vfe32_ctrl)) ? MSM_STATS_TYPE_AF
+			: MSM_STATS_TYPE_BF;
+	if (status_bits & VFE_IRQ_STATUS0_STATS_AF_BF) {
 		addr = (uint32_t)vfe32_stats_dqbuf(vfe32_ctrl,
-					MSM_STATS_TYPE_AF);
+					stats_type);
 		if (addr) {
-			vfe32_ctrl->afStatsControl.bufToRender =
+			vfe32_ctrl->afbfStatsControl.bufToRender =
 				vfe32_process_stats_irq_common(
 				vfe32_ctrl, statsAfNum,
 				addr);
 			process_stats = true;
 		} else {
-			vfe32_ctrl->afStatsControl.bufToRender = 0;
-			vfe32_ctrl->afStatsControl.droppedStatsFrameCount++;
+			vfe32_ctrl->afbfStatsControl.bufToRender = 0;
+			vfe32_ctrl->afbfStatsControl.droppedStatsFrameCount++;
 		}
 	} else {
-		vfe32_ctrl->afStatsControl.bufToRender = 0;
+		vfe32_ctrl->afbfStatsControl.bufToRender = 0;
 	}
 
 	if (status_bits & VFE_IRQ_STATUS0_STATS_IHIST) {
@@ -4182,17 +4578,21 @@
 		CDBG("irq	resetAckIrq\n");
 		vfe32_process_reset_irq(vfe32_ctrl);
 		break;
-	case VFE_IRQ_STATUS0_STATS_AEC:
+	case VFE_IRQ_STATUS0_STATS_AEC_BG:
 		CDBG("Stats AEC irq occured.\n");
-		vfe32_process_stats_ae_irq(vfe32_ctrl);
+		vfe32_process_stats_ae_bg_irq(vfe32_ctrl);
 		break;
 	case VFE_IRQ_STATUS0_STATS_AWB:
 		CDBG("Stats AWB irq occured.\n");
 		vfe32_process_stats_awb_irq(vfe32_ctrl);
 		break;
-	case VFE_IRQ_STATUS0_STATS_AF:
+	case VFE_IRQ_STATUS0_STATS_AF_BF:
 		CDBG("Stats AF irq occured.\n");
-		vfe32_process_stats_af_irq(vfe32_ctrl);
+		vfe32_process_stats_af_bf_irq(vfe32_ctrl);
+		break;
+	case VFE_IRQ_STATUS0_STATS_SK_BHIST:
+		CDBG("Stats BHIST irq occured.\n");
+		vfe32_process_stats_bhist_irq(vfe32_ctrl);
 		break;
 	case VFE_IRQ_STATUS0_STATS_IHIST:
 		CDBG("Stats IHIST irq occured.\n");
@@ -4261,11 +4661,11 @@
 		} else {
 			stat_interrupt =
 				(qcmd->vfeInterruptStatus0 &
-					VFE_IRQ_STATUS0_STATS_AEC) |
+					VFE_IRQ_STATUS0_STATS_AEC_BG) |
 				(qcmd->vfeInterruptStatus0 &
 					VFE_IRQ_STATUS0_STATS_AWB) |
 				(qcmd->vfeInterruptStatus0 &
-					VFE_IRQ_STATUS0_STATS_AF) |
+					VFE_IRQ_STATUS0_STATS_AF_BF) |
 				(qcmd->vfeInterruptStatus0 &
 					VFE_IRQ_STATUS0_STATS_IHIST) |
 				(qcmd->vfeInterruptStatus0 &
@@ -4333,10 +4733,10 @@
 			} else {
 				/* process individual stats interrupt. */
 				if (qcmd->vfeInterruptStatus0 &
-						VFE_IRQ_STATUS0_STATS_AEC)
+						VFE_IRQ_STATUS0_STATS_AEC_BG)
 					v4l2_subdev_notify(&axi_ctrl->subdev,
 					NOTIFY_VFE_IRQ,
-					(void *)VFE_IRQ_STATUS0_STATS_AEC);
+					(void *)VFE_IRQ_STATUS0_STATS_AEC_BG);
 
 				if (qcmd->vfeInterruptStatus0 &
 						VFE_IRQ_STATUS0_STATS_AWB)
@@ -4345,10 +4745,15 @@
 					(void *)VFE_IRQ_STATUS0_STATS_AWB);
 
 				if (qcmd->vfeInterruptStatus0 &
-						VFE_IRQ_STATUS0_STATS_AF)
+						VFE_IRQ_STATUS0_STATS_AF_BF)
 					v4l2_subdev_notify(&axi_ctrl->subdev,
 					NOTIFY_VFE_IRQ,
-					(void *)VFE_IRQ_STATUS0_STATS_AF);
+					(void *)VFE_IRQ_STATUS0_STATS_AF_BF);
+				if (qcmd->vfeInterruptStatus0 &
+						VFE_IRQ_STATUS0_STATS_SK_BHIST)
+					v4l2_subdev_notify(&axi_ctrl->subdev,
+					NOTIFY_VFE_IRQ,
+					(void *)VFE_IRQ_STATUS0_STATS_SK_BHIST);
 
 				if (qcmd->vfeInterruptStatus0 &
 						VFE_IRQ_STATUS0_STATS_IHIST)
@@ -4521,6 +4926,22 @@
 			vfe_ctrl->stats_ops.client);
 	}
 	break;
+	case VFE_CMD_STATS_UNREGBUF:
+	{
+		struct msm_stats_reqbuf *req_buf = NULL;
+		req_buf = (struct msm_stats_reqbuf *)cmd->value;
+		if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats reqbuf input size = %d,\n"
+				"struct size = %d, mitch match\n",
+				 __func__, cmd->length,
+				sizeof(struct msm_stats_reqbuf));
+			rc = -EINVAL ;
+			goto end;
+		}
+		rc = vfe32_stats_unregbuf(vfe_ctrl, req_buf);
+	}
+	break;
 	default:
 		rc = -1;
 		pr_err("%s: cmd_type %d not supported", __func__,
@@ -4570,27 +4991,31 @@
 	case VFE_CMD_STATS_REQBUF:
 	case VFE_CMD_STATS_ENQUEUEBUF:
 	case VFE_CMD_STATS_FLUSH_BUFQ:
+	case VFE_CMD_STATS_UNREGBUF:
 		/* for easy porting put in one envelope */
 		rc = vfe_stats_bufq_sub_ioctl(vfe32_ctrl,
 				cmd, vfe_params->data);
 		return rc;
 	default:
 		if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
-			cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
-			cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
-			cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
-			cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
-			cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
-			cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
-			cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
-			cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE) {
-				if (copy_from_user(&vfecmd,
+		cmd->cmd_type != CMD_CONFIG_PONG_ADDR &&
+		cmd->cmd_type != CMD_CONFIG_FREE_BUF_ADDR &&
+		cmd->cmd_type != CMD_STATS_AEC_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_AWB_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_IHIST_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_RS_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_CS_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_AF_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_BG_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_BF_BUF_RELEASE &&
+		cmd->cmd_type != CMD_STATS_BHIST_BUF_RELEASE) {
+			if (copy_from_user(&vfecmd,
 					(void __user *)(cmd->value),
 					sizeof(vfecmd))) {
-						pr_err("%s %d: copy_from_user failed\n",
-							__func__, __LINE__);
-					return -EFAULT;
-				}
+				pr_err("%s %d: copy_from_user failed\n",
+					__func__, __LINE__);
+				return -EFAULT;
+			}
 		} else {
 			/* here eith stats release or frame release. */
 			if (cmd->cmd_type != CMD_CONFIG_PING_ADDR &&
@@ -4612,6 +5037,25 @@
 				sack->nextStatsBuf = *(uint32_t *)data;
 			}
 		}
+	}
+
+	CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
+
+	if ((cmd->cmd_type == CMD_STATS_AF_ENABLE)    ||
+		(cmd->cmd_type == CMD_STATS_AWB_ENABLE)   ||
+		(cmd->cmd_type == CMD_STATS_IHIST_ENABLE) ||
+		(cmd->cmd_type == CMD_STATS_RS_ENABLE)    ||
+		(cmd->cmd_type == CMD_STATS_CS_ENABLE)    ||
+		(cmd->cmd_type == CMD_STATS_AEC_ENABLE)   ||
+		(cmd->cmd_type == CMD_STATS_BG_ENABLE)    ||
+		(cmd->cmd_type == CMD_STATS_BF_ENABLE)    ||
+		(cmd->cmd_type == CMD_STATS_BHIST_ENABLE)) {
+		struct axidata *axid;
+		axid = data;
+		if (!axid) {
+			rc = -EFAULT;
+			goto vfe32_config_done;
+		}
 		CDBG("%s: cmdType = %d\n", __func__, cmd->cmd_type);
 
 		if ((cmd->cmd_type == CMD_STATS_AF_ENABLE)    ||
@@ -4625,38 +5069,56 @@
 				goto vfe32_config_done;
 		}
 		switch (cmd->cmd_type) {
-		case CMD_GENERAL:
-			rc = vfe32_proc_general(pmctl, &vfecmd, vfe32_ctrl);
-		break;
-		case CMD_CONFIG_PING_ADDR: {
-			int path = *((int *)cmd->value);
-			struct vfe32_output_ch *outch =
-				vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
-			outch->ping = *((struct msm_free_buf *)data);
+		case CMD_STATS_AEC_ENABLE:
+		case CMD_STATS_BG_ENABLE:
+		case CMD_STATS_BF_ENABLE:
+		case CMD_STATS_BHIST_ENABLE:
+		case CMD_STATS_AWB_ENABLE:
+		case CMD_STATS_IHIST_ENABLE:
+		case CMD_STATS_RS_ENABLE:
+		case CMD_STATS_CS_ENABLE:
+		default:
+			pr_err("%s Unsupported cmd type %d",
+				__func__, cmd->cmd_type);
+			break;
 		}
-		break;
-		case CMD_CONFIG_PONG_ADDR: {
-			int path = *((int *)cmd->value);
-			struct vfe32_output_ch *outch =
-				vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
-			outch->pong = *((struct msm_free_buf *)data);
-		}
+		goto vfe32_config_done;
+	}
+	switch (cmd->cmd_type) {
+	case CMD_GENERAL:
+		rc = vfe32_proc_general(pmctl, &vfecmd, vfe32_ctrl);
+	break;
+	case CMD_CONFIG_PING_ADDR: {
+		int path = *((int *)cmd->value);
+		struct vfe32_output_ch *outch =
+			vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+		outch->ping = *((struct msm_free_buf *)data);
+	}
+	break;
+
+	case CMD_CONFIG_PONG_ADDR: {
+		int path = *((int *)cmd->value);
+		struct vfe32_output_ch *outch =
+			vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+		outch->pong = *((struct msm_free_buf *)data);
+	}
+	break;
+
+	case CMD_CONFIG_FREE_BUF_ADDR: {
+		int path = *((int *)cmd->value);
+		struct vfe32_output_ch *outch =
+			vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
+		outch->free_buf = *((struct msm_free_buf *)data);
+	}
+	break;
+
+	case CMD_SNAP_BUF_RELEASE:
 		break;
 
-		case CMD_CONFIG_FREE_BUF_ADDR: {
-			int path = *((int *)cmd->value);
-			struct vfe32_output_ch *outch =
-				vfe32_get_ch(path, vfe32_ctrl->share_ctrl);
-			outch->free_buf = *((struct msm_free_buf *)data);
-		}
-		break;
-		case CMD_SNAP_BUF_RELEASE:
-		break;
-		default:
-			pr_err("%s Unsupported AXI configuration %x ", __func__,
-				cmd->cmd_type);
-		break;
-		}
+	default:
+		pr_err("%s Unsupported AXI configuration %x ", __func__,
+			cmd->cmd_type);
+	break;
 	}
 vfe32_config_done:
 	kfree(scfg);
@@ -5424,6 +5886,8 @@
 		axi32_do_tasklet, (unsigned long)axi_ctrl);
 
 	vfe32_ctrl->pdev = pdev;
+	/*disable bayer stats by default*/
+	vfe32_ctrl->ver_num.main = 0;
 	return 0;
 
 vfe32_no_resource:
diff --git a/drivers/media/video/msm/msm_vfe32.h b/drivers/media/video/msm/vfe/msm_vfe32.h
similarity index 93%
rename from drivers/media/video/msm/msm_vfe32.h
rename to drivers/media/video/msm/vfe/msm_vfe32.h
index 9336cfb..d3575d7 100644
--- a/drivers/media/video/msm/msm_vfe32.h
+++ b/drivers/media/video/msm/vfe/msm_vfe32.h
@@ -104,12 +104,13 @@
 #define VFE_IRQ_STATUS1_RESET_AXI_HALT_ACK_MASK   0x00800000
 #define VFE_IRQ_STATUS0_STATS_COMPOSIT_MASK       0x01000000
 
-#define VFE_IRQ_STATUS0_STATS_AEC     0x2000  /* bit 13 */
-#define VFE_IRQ_STATUS0_STATS_AF      0x4000  /* bit 14 */
-#define VFE_IRQ_STATUS0_STATS_AWB     0x8000  /* bit 15 */
-#define VFE_IRQ_STATUS0_STATS_RS      0x10000  /* bit 16 */
-#define VFE_IRQ_STATUS0_STATS_CS      0x20000  /* bit 17 */
-#define VFE_IRQ_STATUS0_STATS_IHIST   0x40000  /* bit 18 */
+#define VFE_IRQ_STATUS0_STATS_AEC_BG    0x2000  /* bit 13 */
+#define VFE_IRQ_STATUS0_STATS_AF_BF     0x4000  /* bit 14 */
+#define VFE_IRQ_STATUS0_STATS_AWB       0x8000  /* bit 15 */
+#define VFE_IRQ_STATUS0_STATS_RS        0x10000  /* bit 16 */
+#define VFE_IRQ_STATUS0_STATS_CS        0x20000  /* bit 17 */
+#define VFE_IRQ_STATUS0_STATS_IHIST     0x40000  /* bit 18 */
+#define VFE_IRQ_STATUS0_STATS_SK_BHIST  0x80000 /* bit 19 */
 
 #define VFE_IRQ_STATUS0_SYNC_TIMER0   0x2000000  /* bit 25 */
 #define VFE_IRQ_STATUS0_SYNC_TIMER1   0x4000000  /* bit 26 */
@@ -174,8 +175,13 @@
 #define RS_CS_ENABLE_MASK 0x00000300   /* bit 8,9  */
 #define CLF_ENABLE_MASK 0x00002000     /* bit 13 */
 #define IHIST_ENABLE_MASK 0x00010000   /* bit 16 */
+#define SKIN_BHIST_ENABLE_MASK 0x00080000 /* bit 19 */
 #define STATS_ENABLE_MASK 0x000903E0   /* bit 19,16,9,8,7,6,5*/
 
+#define STATS_BG_ENABLE_MASK     0x00000002 /* bit 1 */
+#define STATS_BF_ENABLE_MASK     0x00000004 /* bit 2 */
+#define STATS_BHIST_ENABLE_MASK  0x00000008 /* bit 3 */
+
 #define VFE_REG_UPDATE_TRIGGER           1
 #define VFE_PM_BUF_MAX_CNT_MASK          0xFF
 #define VFE_DMI_CFG_DEFAULT              0x00000100
@@ -240,12 +246,13 @@
 
 #define V32_OPERATION_CFG_LEN     44
 
-#define V32_AXI_OUT_OFF           0x00000038
+#define V32_AXI_BUS_CMD_OFF       0x00000038
+#define V32_AXI_OUT_OFF           0x0000003C
 #define V32_AXI_OUT_LEN           240
-#define V32_AXI_CH_INF_LEN        48
 #define V32_AXI_CFG_LEN           47
-#define V32_AXI_BUS_FMT_OFF    1
-#define V32_AXI_BUS_FMT_LEN    4
+#define V32_AXI_BUS_FMT_OFF       1
+#define V32_AXI_BUS_FMT_LEN       4
+#define V32_AXI_BUS_CFG_LEN       16
 
 #define V32_FRAME_SKIP_OFF        0x00000504
 #define V32_FRAME_SKIP_LEN        32
@@ -378,6 +385,15 @@
 #define V32_CLF_CHROMA_UPDATE_OFF 0x000006F0
 #define V32_CLF_CHROMA_UPDATE_LEN 8
 
+#define V32_STATS_BG_OFF 0x00000700
+#define V32_STATS_BG_LEN 12
+
+#define V32_STATS_BF_OFF 0x0000070c
+#define V32_STATS_BF_LEN 24
+
+#define V32_STATS_BHIST_OFF 0x00000724
+#define V32_STATS_BHIST_LEN 8
+
 struct vfe_cmd_hw_version {
 	uint32_t minorVersion;
 	uint32_t majorVersion;
@@ -845,12 +861,12 @@
 #define VFE_AXI_STATUS        0x000001DC
 #define VFE_BUS_STATS_PING_PONG_BASE    0x000000F4
 
-#define VFE_BUS_STATS_AEC_WR_PING_ADDR    0x000000F4
-#define VFE_BUS_STATS_AEC_WR_PONG_ADDR    0x000000F8
-#define VFE_BUS_STATS_AEC_UB_CFG          0x000000FC
-#define VFE_BUS_STATS_AF_WR_PING_ADDR     0x00000100
-#define VFE_BUS_STATS_AF_WR_PONG_ADDR     0x00000104
-#define VFE_BUS_STATS_AF_UB_CFG           0x00000108
+#define VFE_BUS_STATS_AEC_BG_WR_PING_ADDR    0x000000F4
+#define VFE_BUS_STATS_AEC_BG_WR_PONG_ADDR    0x000000F8
+#define VFE_BUS_STATS_AEC_BG_UB_CFG          0x000000FC
+#define VFE_BUS_STATS_AF_BF_WR_PING_ADDR     0x00000100
+#define VFE_BUS_STATS_AF_BF_WR_PONG_ADDR     0x00000104
+#define VFE_BUS_STATS_AF_BF_UB_CFG           0x00000108
 #define VFE_BUS_STATS_AWB_WR_PING_ADDR    0x0000010C
 #define VFE_BUS_STATS_AWB_WR_PONG_ADDR    0x00000110
 #define VFE_BUS_STATS_AWB_UB_CFG          0x00000114
@@ -864,9 +880,9 @@
 #define VFE_BUS_STATS_HIST_WR_PING_ADDR   0x00000130
 #define VFE_BUS_STATS_HIST_WR_PONG_ADDR   0x00000134
 #define VFE_BUS_STATS_HIST_UB_CFG          0x00000138
-#define VFE_BUS_STATS_SKIN_WR_PING_ADDR    0x0000013C
-#define VFE_BUS_STATS_SKIN_WR_PONG_ADDR    0x00000140
-#define VFE_BUS_STATS_SKIN_UB_CFG          0x00000144
+#define VFE_BUS_STATS_SKIN_BHIST_WR_PING_ADDR    0x0000013C
+#define VFE_BUS_STATS_SKIN_BHIST_WR_PONG_ADDR    0x00000140
+#define VFE_BUS_STATS_SKIN_BHIST_UB_CFG          0x00000144
 #define VFE_CAMIF_COMMAND               0x000001E0
 #define VFE_CAMIF_STATUS                0x00000204
 #define VFE_REG_UPDATE_CMD              0x00000260
@@ -888,6 +904,7 @@
 #define VFE_STATS_AWB_SGW_CFG           0x00000554
 #define VFE_DMI_CFG                     0x00000598
 #define VFE_DMI_ADDR                    0x0000059C
+#define VFE_DMI_DATA_HI                 0x000005A0
 #define VFE_DMI_DATA_LO                 0x000005A4
 #define VFE_BUS_IO_FORMAT_CFG           0x000006F8
 #define VFE_PIXEL_IF_CFG                0x000006FC
@@ -970,8 +987,9 @@
 	void *extdata;
 
 	int8_t start_ack_pending;
-	int8_t reset_ack_pending;
 	int8_t update_ack_pending;
+	bool is_reset_blocking;
+	struct completion reset_complete;
 	enum vfe_output_state recording_state;
 	int8_t update_linear;
 	int8_t update_rolloff;
@@ -990,12 +1008,14 @@
 	uint32_t output2Period;
 	uint32_t vfeFrameSkipCount;
 	uint32_t vfeFrameSkipPeriod;
-	struct vfe_stats_control afStatsControl;
+	struct msm_ver_num_info ver_num;
+	struct vfe_stats_control afbfStatsControl;
 	struct vfe_stats_control awbStatsControl;
-	struct vfe_stats_control aecStatsControl;
+	struct vfe_stats_control aecbgStatsControl;
 	struct vfe_stats_control ihistStatsControl;
 	struct vfe_stats_control rsStatsControl;
 	struct vfe_stats_control csStatsControl;
+	struct vfe_stats_control bhistStatsControl;
 
 	/* v4l2 subdev */
 	struct v4l2_subdev subdev;
diff --git a/drivers/media/video/msm/msm_vfe7x.c b/drivers/media/video/msm/vfe/msm_vfe7x.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x.c
rename to drivers/media/video/msm/vfe/msm_vfe7x.c
diff --git a/drivers/media/video/msm/msm_vfe7x.h b/drivers/media/video/msm/vfe/msm_vfe7x.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x.h
rename to drivers/media/video/msm/vfe/msm_vfe7x.h
diff --git a/drivers/media/video/msm/msm_vfe7x27a.c b/drivers/media/video/msm/vfe/msm_vfe7x27a.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x27a.c
rename to drivers/media/video/msm/vfe/msm_vfe7x27a.c
diff --git a/drivers/media/video/msm/msm_vfe7x27a.h b/drivers/media/video/msm/vfe/msm_vfe7x27a.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe7x27a.h
rename to drivers/media/video/msm/vfe/msm_vfe7x27a.h
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
similarity index 89%
rename from drivers/media/video/msm/msm_vfe7x27a_v4l2.c
rename to drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
index 64e0385..b6daa5f 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.c
+++ b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.c
@@ -404,6 +404,25 @@
 	return 0L;
 }
 
+static unsigned long vfe2x_stats_unregbuf(
+	struct msm_stats_reqbuf *req_buf)
+{
+	int i = 0, rc = 0;
+
+	for (i = 0; i < req_buf->num_buf; i++) {
+		rc = vfe2x_ctrl->stats_ops.buf_unprepare(
+			vfe2x_ctrl->stats_ops.stats_ctrl,
+			req_buf->stats_type, i,
+			vfe2x_ctrl->stats_ops.client);
+		if (rc < 0) {
+			pr_err("%s: unreg stats buf (type = %d) err = %d",
+				__func__, req_buf->stats_type, rc);
+		return rc;
+		}
+	}
+	return 0L;
+}
+
 static int vfe2x_stats_buf_init(enum msm_stats_enum_type type)
 {
 	unsigned long flags;
@@ -556,6 +575,22 @@
 				vfe2x_ctrl->stats_ops.client);
 	}
 	break;
+	case VFE_CMD_STATS_UNREGBUF:
+	{
+		struct msm_stats_reqbuf *req_buf = NULL;
+		req_buf = (struct msm_stats_reqbuf *)cmd->value;
+		if (sizeof(struct msm_stats_reqbuf) != cmd->length) {
+			/* error. the length not match */
+			pr_err("%s: stats reqbuf input size = %d,\n"
+				"struct size = %d, mitch match\n",
+				 __func__, cmd->length,
+				sizeof(struct msm_stats_reqbuf));
+			rc = -EINVAL ;
+			goto end;
+		}
+		rc = vfe2x_stats_unregbuf(req_buf);
+	}
+	break;
 	default:
 		rc = -1;
 		pr_err("%s: cmd_type %d not supported",
@@ -659,29 +694,63 @@
 		switch (id) {
 		case MSG_SNAPSHOT:
 			msm_camio_set_perf_lvl(S_PREVIEW);
-			vfe_7x_ops(driver_data, MSG_OUTPUT_S, len, getevent);
-			if (!raw_mode)
-				vfe_7x_ops(driver_data, MSG_OUTPUT_T,
+			while (vfe2x_ctrl->snap.frame_cnt <
+				vfe2x_ctrl->num_snap) {
+				vfe_7x_ops(driver_data, MSG_OUTPUT_S, len,
+					getevent);
+				if (!raw_mode)
+					vfe_7x_ops(driver_data, MSG_OUTPUT_T,
 						len, getevent);
+			}
 			vfe2x_send_isp_msg(vfe2x_ctrl, MSG_ID_SNAPSHOT_DONE);
 			kfree(data);
 			return;
 		case MSG_OUTPUT_S:
 			outch = &vfe2x_ctrl->snap;
-			y_phy = outch->ping.ch_paddr[0];
-			cbcr_phy = outch->ping.ch_paddr[1];
-			CDBG("MSG_OUTPUT_S: %x %x\n",
-				(unsigned int)y_phy, (unsigned int)cbcr_phy);
+			if (outch->frame_cnt == 0) {
+				y_phy = outch->ping.ch_paddr[0];
+				cbcr_phy = outch->ping.ch_paddr[1];
+			} else if (outch->frame_cnt == 1) {
+				y_phy = outch->pong.ch_paddr[0];
+				cbcr_phy = outch->pong.ch_paddr[1];
+			} else if (outch->frame_cnt == 2) {
+				y_phy = outch->free_buf.ch_paddr[0];
+				cbcr_phy = outch->free_buf.ch_paddr[1];
+			} else {
+				y_phy = outch->free_buf_arr[outch->frame_cnt
+					- 3].ch_paddr[0];
+				cbcr_phy = outch->free_buf_arr[outch->frame_cnt
+					- 3].ch_paddr[1];
+			}
+			outch->frame_cnt++;
+			CDBG("MSG_OUTPUT_S: %x %x %d\n",
+				(unsigned int)y_phy, (unsigned int)cbcr_phy,
+					outch->frame_cnt);
 			vfe_send_outmsg(&vfe2x_ctrl->subdev,
 					MSG_ID_OUTPUT_PRIMARY,
 						y_phy, cbcr_phy);
 			break;
 		case MSG_OUTPUT_T:
 			outch = &vfe2x_ctrl->thumb;
-			y_phy = outch->ping.ch_paddr[0];
-			cbcr_phy = outch->ping.ch_paddr[1];
-			CDBG("MSG_OUTPUT_T: %x %x\n",
-				(unsigned int)y_phy, (unsigned int)cbcr_phy);
+			if (outch->frame_cnt == 0) {
+				y_phy = outch->ping.ch_paddr[0];
+				cbcr_phy = outch->ping.ch_paddr[1];
+			} else if (outch->frame_cnt == 1) {
+				y_phy = outch->pong.ch_paddr[0];
+				cbcr_phy = outch->pong.ch_paddr[1];
+			} else if (outch->frame_cnt == 2) {
+				y_phy = outch->free_buf.ch_paddr[0];
+				cbcr_phy = outch->free_buf.ch_paddr[1];
+			} else {
+				y_phy = outch->free_buf_arr[outch->frame_cnt
+					- 3].ch_paddr[0];
+				cbcr_phy = outch->free_buf_arr[outch->frame_cnt
+					- 3].ch_paddr[1];
+			}
+			outch->frame_cnt++;
+			CDBG("MSG_OUTPUT_T: %x %x %d\n",
+				(unsigned int)y_phy, (unsigned int)cbcr_phy,
+				outch->frame_cnt);
 			vfe_send_outmsg(&vfe2x_ctrl->subdev,
 						MSG_ID_OUTPUT_SECONDARY,
 							y_phy, cbcr_phy);
@@ -885,8 +954,9 @@
 			vfe2x_ctrl->vfeFrameId++;
 			if (vfe2x_ctrl->vfeFrameId == 0)
 				vfe2x_ctrl->vfeFrameId = 1; /* wrapped back */
-			if ((op_mode & SNAPSHOT_MASK_MODE) && !raw_mode) {
-				pr_err("Ignore SOF for snapshot\n");
+			if ((op_mode & SNAPSHOT_MASK_MODE) && !raw_mode
+				&& (vfe2x_ctrl->num_snap <= 1)) {
+				CDBG("Ignore SOF for snapshot\n");
 				kfree(data);
 				return;
 			}
@@ -990,7 +1060,76 @@
 	if (op_mode & SNAPSHOT_MASK_MODE)
 		o_mode = SNAPSHOT_MASK_MODE;
 
-	if (mode == OUTPUT_SEC) {
+	if ((o_mode == SNAPSHOT_MASK_MODE) && (vfe2x_ctrl->num_snap > 1)) {
+		CDBG("%s: BURST mode freebuf cnt %d", __func__,
+			ad->free_buf_cnt);
+		/* Burst */
+		if (mode == OUTPUT_SEC) {
+			ao->output1buffer1_y_phy = ad->ping.ch_paddr[0];
+			ao->output1buffer1_cbcr_phy = ad->ping.ch_paddr[1];
+			ao->output1buffer2_y_phy = ad->pong.ch_paddr[0];
+			ao->output1buffer2_cbcr_phy = ad->pong.ch_paddr[1];
+			ao->output1buffer3_y_phy = ad->free_buf.ch_paddr[0];
+			ao->output1buffer3_cbcr_phy = ad->free_buf.ch_paddr[1];
+			bptr = &ao->output1buffer4_y_phy;
+			for (cnt = 0; cnt < 5; cnt++) {
+				*bptr = (cnt < ad->free_buf_cnt-3) ?
+					ad->free_buf_arr[cnt].ch_paddr[0] :
+						ad->pong.ch_paddr[0];
+				bptr++;
+				*bptr = (cnt < ad->free_buf_cnt-3) ?
+					ad->free_buf_arr[cnt].ch_paddr[1] :
+						ad->pong.ch_paddr[1];
+				bptr++;
+			}
+			CDBG("%x %x\n", (unsigned int)ao->output1buffer1_y_phy,
+				(unsigned int)ao->output1buffer1_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output1buffer2_y_phy,
+				(unsigned int)ao->output1buffer2_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output1buffer3_y_phy,
+				(unsigned int)ao->output1buffer3_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output1buffer4_y_phy,
+				(unsigned int)ao->output1buffer4_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output1buffer5_y_phy,
+				(unsigned int)ao->output1buffer5_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output1buffer6_y_phy,
+				(unsigned int)ao->output1buffer6_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output1buffer7_y_phy,
+				(unsigned int)ao->output1buffer7_cbcr_phy);
+		} else { /*Primary*/
+			ao->output2buffer1_y_phy = ad->ping.ch_paddr[0];
+			ao->output2buffer1_cbcr_phy = ad->ping.ch_paddr[1];
+			ao->output2buffer2_y_phy = ad->pong.ch_paddr[0];
+			ao->output2buffer2_cbcr_phy = ad->pong.ch_paddr[1];
+			ao->output2buffer3_y_phy = ad->free_buf.ch_paddr[0];
+			ao->output2buffer3_cbcr_phy = ad->free_buf.ch_paddr[1];
+			bptr = &ao->output2buffer4_y_phy;
+			for (cnt = 0; cnt < 5; cnt++) {
+				*bptr = (cnt < ad->free_buf_cnt-3) ?
+					ad->free_buf_arr[cnt].ch_paddr[0] :
+						ad->pong.ch_paddr[0];
+				bptr++;
+				*bptr = (cnt < ad->free_buf_cnt-3) ?
+					ad->free_buf_arr[cnt].ch_paddr[1] :
+						ad->pong.ch_paddr[1];
+				bptr++;
+			}
+			CDBG("%x %x\n", (unsigned int)ao->output2buffer1_y_phy,
+				(unsigned int)ao->output2buffer1_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output2buffer2_y_phy,
+				(unsigned int)ao->output2buffer2_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output2buffer3_y_phy,
+				(unsigned int)ao->output2buffer3_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output2buffer4_y_phy,
+				(unsigned int)ao->output2buffer4_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output2buffer5_y_phy,
+				(unsigned int)ao->output2buffer5_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output2buffer6_y_phy,
+				(unsigned int)ao->output2buffer6_cbcr_phy);
+			CDBG("%x %x\n", (unsigned int)ao->output2buffer7_y_phy,
+				(unsigned int)ao->output2buffer7_cbcr_phy);
+		}
+	} else if (mode == OUTPUT_SEC) {
 		/* Thumbnail */
 		if (vfe2x_ctrl->zsl_mode) {
 			ao->output1buffer1_y_phy = ad->ping.ch_paddr[0];
@@ -1228,6 +1367,7 @@
 		cmd->cmd_type != CMD_VFE_BUFFER_RELEASE &&
 		cmd->cmd_type != VFE_CMD_STATS_REQBUF &&
 		cmd->cmd_type != VFE_CMD_STATS_FLUSH_BUFQ &&
+		cmd->cmd_type != VFE_CMD_STATS_UNREGBUF &&
 		cmd->cmd_type != VFE_CMD_STATS_ENQUEUEBUF) {
 		if (copy_from_user(&vfecmd,
 			   (void __user *)(cmd->value),
@@ -1239,6 +1379,7 @@
 	switch (cmd->cmd_type) {
 	case VFE_CMD_STATS_REQBUF:
 	case VFE_CMD_STATS_FLUSH_BUFQ:
+	case VFE_CMD_STATS_UNREGBUF:
 		/* for easy porting put in one envelope */
 		rc = vfe2x_stats_bufq_sub_ioctl(cmd, vfe_params->data);
 		return rc;
@@ -1311,7 +1452,20 @@
 	case CMD_CONFIG_FREE_BUF_ADDR: {
 		int path = *((int *)cmd->value);
 		struct buf_info *outch = vfe2x_get_ch(path);
-		outch->free_buf = *((struct msm_free_buf *)data);
+		if ((op_mode & SNAPSHOT_MASK_MODE) &&
+			(vfe2x_ctrl->num_snap > 1)) {
+			CDBG("%s: CMD_CONFIG_FREE_BUF_ADDR Burst mode %d",
+					__func__, outch->free_buf_cnt);
+			if (outch->free_buf_cnt <= 0)
+				outch->free_buf =
+					*((struct msm_free_buf *)data);
+			else
+				outch->free_buf_arr[outch->free_buf_cnt-1] =
+					*((struct msm_free_buf *)data);
+			++outch->free_buf_cnt;
+		} else {
+			outch->free_buf = *((struct msm_free_buf *)data);
+		}
 	}
 		return 0;
 
@@ -1489,6 +1643,12 @@
 							vfecmd.length))
 				rc = -EFAULT;
 			op_mode = vfe2x_ctrl->start_cmd.mode_of_operation;
+			vfe2x_ctrl->snap.free_buf_cnt = 0;
+			vfe2x_ctrl->thumb.free_buf_cnt = 0;
+			vfe2x_ctrl->snap.frame_cnt = 0;
+			vfe2x_ctrl->thumb.frame_cnt = 0;
+			vfe2x_ctrl->num_snap =
+				vfe2x_ctrl->start_cmd.snap_number;
 			return rc;
 		}
 		if (vfecmd.id == VFE_CMD_RECONFIG_VFE) {
@@ -1626,11 +1786,11 @@
 		}
 		if (op_mode & SNAPSHOT_MASK_MODE)
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_CAPTURE,
+						VFE_MSG_CAPTURE,
 						VFE_MSG_OUTPUT_SECONDARY);
 		else
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_SECONDARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1680,11 +1840,11 @@
 		if (!vfe2x_ctrl->reconfig_vfe) {
 			if (op_mode & SNAPSHOT_MASK_MODE)
 				rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_CAPTURE,
+						VFE_MSG_CAPTURE,
 						VFE_MSG_OUTPUT_PRIMARY);
 			else
 				rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_PRIMARY);
 			if (rc < 0) {
 				pr_err("%s error configuring pingpong buffers"
@@ -1745,10 +1905,10 @@
 		}
 		if (!vfe2x_ctrl->reconfig_vfe) {
 				rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_PRIMARY);
 				rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_SECONDARY);
 			if (rc < 0) {
 				pr_err("%s error configuring pingpong buffers"
@@ -1809,11 +1969,11 @@
 		}
 		if (op_mode & SNAPSHOT_MASK_MODE)
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_CAPTURE,
+						VFE_MSG_CAPTURE,
 						VFE_MSG_OUTPUT_SECONDARY);
 		else
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_SECONDARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1822,10 +1982,21 @@
 			goto config_done;
 		}
 
-		if (!(op_mode & SNAPSHOT_MASK_MODE))
+		if (!(op_mode & SNAPSHOT_MASK_MODE)) {
 			free_buf = vfe2x_check_free_buffer(
 					VFE_MSG_OUTPUT_IRQ,
 					VFE_MSG_OUTPUT_SECONDARY);
+		} else if ((op_mode & SNAPSHOT_MASK_MODE) &&
+				(vfe2x_ctrl->num_snap > 1)) {
+			int i = 0;
+			CDBG("Burst mode AXI config SEC snap cnt %d\n",
+				vfe2x_ctrl->num_snap);
+			for (i = 0; i < vfe2x_ctrl->num_snap - 2; i++) {
+				free_buf = vfe2x_check_free_buffer(
+					VFE_MSG_OUTPUT_IRQ,
+					VFE_MSG_OUTPUT_SECONDARY);
+			}
+		}
 		header = cmds_map[vfecmd.id].vfe_id;
 		queue = cmds_map[vfecmd.id].queue;
 		if (header == -1 && queue == -1) {
@@ -1840,11 +2011,11 @@
 
 		if (op_mode & SNAPSHOT_MASK_MODE)
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_CAPTURE,
+						VFE_MSG_CAPTURE,
 						VFE_MSG_OUTPUT_PRIMARY);
 		else
 			rc = vfe2x_configure_pingpong_buffers(
-						VFE_MSG_V2X_PREVIEW,
+						VFE_MSG_PREVIEW,
 						VFE_MSG_OUTPUT_PRIMARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
@@ -1853,10 +2024,22 @@
 			goto config_done;
 		}
 
-		if (!(op_mode & SNAPSHOT_MASK_MODE))
+		if (!(op_mode & SNAPSHOT_MASK_MODE)) {
 			free_buf = vfe2x_check_free_buffer(
 					VFE_MSG_OUTPUT_IRQ,
 					VFE_MSG_OUTPUT_PRIMARY);
+		} else if ((op_mode & SNAPSHOT_MASK_MODE) &&
+				(vfe2x_ctrl->num_snap > 1)) {
+			int i = 0;
+			CDBG("Burst mode AXI config PRIM snap cnt %d\n",
+				vfe2x_ctrl->num_snap);
+			for (i = 0; i < vfe2x_ctrl->num_snap - 2; i++) {
+				free_buf = vfe2x_check_free_buffer(
+					VFE_MSG_OUTPUT_IRQ,
+					VFE_MSG_OUTPUT_PRIMARY);
+			}
+		}
+
 		if (op_mode & SNAPSHOT_MASK_MODE)
 			vfe_7x_config_axi(OUTPUT_PRIM,
 					&vfe2x_ctrl->snap, axio);
@@ -1883,7 +2066,7 @@
 		}
 		header = cmds_map[vfecmd.id].vfe_id;
 		queue = cmds_map[vfecmd.id].queue;
-		rc = vfe2x_configure_pingpong_buffers(VFE_MSG_V2X_CAPTURE,
+		rc = vfe2x_configure_pingpong_buffers(VFE_MSG_CAPTURE,
 						VFE_MSG_OUTPUT_PRIMARY);
 		if (rc < 0) {
 			pr_err("%s error configuring pingpong buffers"
diff --git a/drivers/media/video/msm/msm_vfe7x27a_v4l2.h b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h
similarity index 97%
rename from drivers/media/video/msm/msm_vfe7x27a_v4l2.h
rename to drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h
index b7d6806..39affc4 100644
--- a/drivers/media/video/msm/msm_vfe7x27a_v4l2.h
+++ b/drivers/media/video/msm/vfe/msm_vfe7x27a_v4l2.h
@@ -18,6 +18,9 @@
 #include "msm.h"
 #include "msm_vfe_stats_buf.h"
 
+/*8 DSP buffers, 3 - ping, pong, free*/
+#define FREE_BUF_ARR_SIZE 5
+
 struct cmd_id_map {
 	uint32_t isp_id;
 	uint32_t vfe_id;
@@ -50,6 +53,10 @@
 	struct msm_free_buf ping;
 	struct msm_free_buf pong;
 	struct msm_free_buf free_buf;
+	/*Array for holding the free buffer if more than one*/
+	struct msm_free_buf free_buf_arr[FREE_BUF_ARR_SIZE];
+	int free_buf_cnt;
+	int frame_cnt;
 } __packed;
 
 struct prev_free_buf_info {
@@ -117,6 +124,7 @@
 	struct msm_stats_ops stats_ops;
 	unsigned long stats_we_buf_ptr[3];
 	unsigned long stats_af_buf_ptr[3];
+	int num_snap;
 } __packed;
 
 struct vfe_frame_extra {
diff --git a/drivers/media/video/msm/msm_vfe8x.c b/drivers/media/video/msm/vfe/msm_vfe8x.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x.c
rename to drivers/media/video/msm/vfe/msm_vfe8x.c
diff --git a/drivers/media/video/msm/msm_vfe8x.h b/drivers/media/video/msm/vfe/msm_vfe8x.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x.h
rename to drivers/media/video/msm/vfe/msm_vfe8x.h
diff --git a/drivers/media/video/msm/msm_vfe8x_proc.c b/drivers/media/video/msm/vfe/msm_vfe8x_proc.c
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x_proc.c
rename to drivers/media/video/msm/vfe/msm_vfe8x_proc.c
diff --git a/drivers/media/video/msm/msm_vfe8x_proc.h b/drivers/media/video/msm/vfe/msm_vfe8x_proc.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe8x_proc.h
rename to drivers/media/video/msm/vfe/msm_vfe8x_proc.h
diff --git a/drivers/media/video/msm/msm_vfe_stats_buf.c b/drivers/media/video/msm/vfe/msm_vfe_stats_buf.c
similarity index 99%
rename from drivers/media/video/msm/msm_vfe_stats_buf.c
rename to drivers/media/video/msm/vfe/msm_vfe_stats_buf.c
index 9e8f285..5fbcdb1 100644
--- a/drivers/media/video/msm/msm_vfe_stats_buf.c
+++ b/drivers/media/video/msm/vfe/msm_vfe_stats_buf.c
@@ -475,6 +475,8 @@
 	struct msm_stats_buf_info *info, struct ion_client *client)
 {
 	int rc = 0;
+	D("%s: stats type : %d, idx : %d\n", __func__,
+		info->type, info->buf_idx);
 	rc = msm_stats_buf_prepare(stats_ctrl, info, client);
 	if (rc < 0) {
 		pr_err("%s: buf_prepare failed, rc = %d", __func__, rc);
diff --git a/drivers/media/video/msm/msm_vfe_stats_buf.h b/drivers/media/video/msm/vfe/msm_vfe_stats_buf.h
similarity index 100%
rename from drivers/media/video/msm/msm_vfe_stats_buf.h
rename to drivers/media/video/msm/vfe/msm_vfe_stats_buf.h
diff --git a/drivers/media/video/msm/wfd/enc-subdev.c b/drivers/media/video/msm/wfd/enc-subdev.c
index a5f2634..8b83a98 100644
--- a/drivers/media/video/msm/wfd/enc-subdev.c
+++ b/drivers/media/video/msm/wfd/enc-subdev.c
@@ -24,6 +24,8 @@
 #define VID_ENC_MAX_ENCODER_CLIENTS 1
 #define MAX_NUM_CTRLS 20
 
+static long venc_fill_outbuf(struct v4l2_subdev *sd, void *arg);
+
 struct venc_inst {
 	struct video_client_ctx venc_client;
 	void *cbdata;
@@ -34,6 +36,8 @@
 	u32 width;
 	u32 height;
 	int secure;
+	struct mem_region unqueued_op_bufs;
+	bool streaming;
 };
 
 struct venc {
@@ -287,8 +291,10 @@
 	init_waitqueue_head(&client_ctx->msg_wait);
 	inst->op_buffer_done = vmops->op_buffer_done;
 	inst->ip_buffer_done = vmops->ip_buffer_done;
+	INIT_LIST_HEAD(&inst->unqueued_op_bufs.list);
 	inst->cbdata = vmops->cbdata;
 	inst->secure = vmops->secure;
+	inst->streaming = false;
 	if (vmops->secure) {
 		WFD_MSG_ERR("OPENING SECURE SESSION\n");
 		flags |= VCD_CP_SESSION;
@@ -423,11 +429,13 @@
 {
 	struct venc_inst *inst = sd->dev_priv;
 	struct video_client_ctx *client_ctx = &inst->venc_client;
+	struct mem_region *curr = NULL, *temp = NULL;
 	int rc;
 	if (!client_ctx) {
 		WFD_MSG_ERR("Client context is NULL");
 		return -EINVAL;
 	}
+
 	rc = vcd_encode_start(client_ctx->vcd_handle);
 	if (rc) {
 		WFD_MSG_ERR("vcd_encode_start failed, rc = %d\n", rc);
@@ -437,6 +445,15 @@
 	if (client_ctx->event_status)
 		WFD_MSG_ERR("callback for vcd_encode_start returned error: %u",
 				client_ctx->event_status);
+
+	inst->streaming = true;
+	/* Push any buffers that we have held back */
+	list_for_each_entry_safe(curr, temp,
+			&inst->unqueued_op_bufs.list, list) {
+		venc_fill_outbuf(sd, curr);
+		list_del(&curr->list);
+		kfree(curr);
+	}
 err:
 	return rc;
 }
@@ -445,6 +462,7 @@
 {
 	struct venc_inst *inst = sd->dev_priv;
 	struct video_client_ctx *client_ctx = &inst->venc_client;
+	struct mem_region *curr = NULL, *temp = NULL;
 	int rc;
 	if (!client_ctx) {
 		WFD_MSG_ERR("Client context is NULL");
@@ -452,6 +470,15 @@
 	}
 	rc = vcd_stop(client_ctx->vcd_handle);
 	wait_for_completion(&client_ctx->event);
+	inst->streaming = false;
+	/* Drop whatever frames we haven't queued */
+	list_for_each_entry_safe(curr, temp,
+			&inst->unqueued_op_bufs.list, list) {
+		inst->op_buffer_done(inst->cbdata, 0,
+				(struct vb2_buffer *)curr->cookie);
+		list_del(&curr->list);
+		kfree(curr);
+	}
 	return rc;
 }
 
@@ -1841,22 +1868,29 @@
 	struct file *file;
 	s32 buffer_index = -1;
 
-	user_vaddr = mregion->cookie;
-	rc = vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT,
-			true, &user_vaddr,
-			&kernel_vaddr, &phy_addr, &pmem_fd, &file,
-			&buffer_index);
-	if (!rc) {
-		WFD_MSG_ERR("Address lookup failed\n");
-		goto err;
-	}
-	vcd_frame.virtual = (u8 *) kernel_vaddr;
-	vcd_frame.frm_clnt_data = mregion->cookie;
-	vcd_frame.alloc_len = mregion->size;
+	if (inst->streaming) {
+		user_vaddr = mregion->cookie;
+		rc = vidc_lookup_addr_table(client_ctx, BUFFER_TYPE_OUTPUT,
+				true, &user_vaddr,
+				&kernel_vaddr, &phy_addr, &pmem_fd, &file,
+				&buffer_index);
+		if (!rc) {
+			WFD_MSG_ERR("Address lookup failed\n");
+			goto err;
+		}
+		vcd_frame.virtual = (u8 *) kernel_vaddr;
+		vcd_frame.frm_clnt_data = mregion->cookie;
+		vcd_frame.alloc_len = mregion->size;
 
-	rc = vcd_fill_output_buffer(client_ctx->vcd_handle,	&vcd_frame);
-	if (rc)
-		WFD_MSG_ERR("Failed to fill output buffer on encoder");
+		rc = vcd_fill_output_buffer(client_ctx->vcd_handle, &vcd_frame);
+		if (rc)
+			WFD_MSG_ERR("Failed to fill output buffer on encoder");
+	} else {
+		struct mem_region *temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+		*temp = *mregion;
+		INIT_LIST_HEAD(&temp->list);
+		list_add_tail(&temp->list, &inst->unqueued_op_bufs.list);
+	}
 err:
 	return rc;
 }
diff --git a/drivers/media/video/msm/wfd/mdp-subdev.c b/drivers/media/video/msm/wfd/mdp-subdev.c
index a6d244f..886b0ba 100644
--- a/drivers/media/video/msm/wfd/mdp-subdev.c
+++ b/drivers/media/video/msm/wfd/mdp-subdev.c
@@ -45,6 +45,13 @@
 		goto exit;
 	}
 
+	/*Tell HDMI daemon to open fb2*/
+	rc = kobject_uevent(&fbi->dev->kobj, KOBJ_ADD);
+	if (rc) {
+		WFD_MSG_ERR("Failed add to kobj");
+		goto exit;
+	}
+
 	msm_fb_writeback_init(fbi);
 	inst->mdp = fbi;
 	*cookie = inst;
@@ -71,6 +78,9 @@
 			rc = -ENODEV;
 			goto exit;
 		}
+		rc = kobject_uevent(&fbi->dev->kobj, KOBJ_ONLINE);
+		if (rc)
+			WFD_MSG_ERR("Failed to send ONLINE event\n");
 	}
 exit:
 	return rc;
@@ -79,12 +89,19 @@
 {
 	struct mdp_instance *inst = arg;
 	int rc = 0;
+	struct fb_info *fbi = NULL;
 	if (inst) {
 		rc = msm_fb_writeback_stop(inst->mdp);
 		if (rc) {
 			WFD_MSG_ERR("Failed to stop writeback mode\n");
 			return rc;
 		}
+		fbi = (struct fb_info *)inst->mdp;
+		rc = kobject_uevent(&fbi->dev->kobj, KOBJ_OFFLINE);
+		if (rc) {
+			WFD_MSG_ERR("Failed to send offline event\n");
+			return -EIO;
+		}
 	}
 	return 0;
 }
diff --git a/drivers/media/video/msm_vidc/msm_v4l2_vidc.c b/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
index 817caf5..cf1ebbb 100644
--- a/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
+++ b/drivers/media/video/msm_vidc/msm_v4l2_vidc.c
@@ -704,6 +704,7 @@
 	struct resource *res;
 	int i = 0;
 	int rc = 0;
+	struct on_chip_mem *ocmem;
 	if (!core)
 		return -EINVAL;
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -753,6 +754,14 @@
 		pr_err("Failed to register iommu domains: %d\n", rc);
 		goto fail_register_domains;
 	}
+	ocmem = &core->resources.ocmem;
+	ocmem->vidc_ocmem_nb.notifier_call = msm_vidc_ocmem_notify_handler;
+	ocmem->handle =
+		ocmem_notifier_register(OCMEM_VIDEO, &ocmem->vidc_ocmem_nb);
+	if (!ocmem->handle) {
+		pr_warn("Failed to register OCMEM notifier.");
+		pr_warn(" Performance will be impacted\n");
+	}
 	return rc;
 fail_register_domains:
 	msm_bus_scale_unregister_client(
@@ -861,6 +870,9 @@
 	video_unregister_device(&core->vdev[MSM_VIDC_ENCODER].vdev);
 	video_unregister_device(&core->vdev[MSM_VIDC_DECODER].vdev);
 	v4l2_device_unregister(&core->v4l2_dev);
+	if (core->resources.ocmem.handle)
+		ocmem_notifier_unregister(core->resources.ocmem.handle,
+				&core->resources.ocmem.vidc_ocmem_nb);
 	kfree(core);
 	return rc;
 }
diff --git a/drivers/media/video/msm_vidc/msm_vidc_common.c b/drivers/media/video/msm_vidc/msm_vidc_common.c
index fa9608d..6835467 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_common.c
+++ b/drivers/media/video/msm_vidc/msm_vidc_common.c
@@ -42,6 +42,11 @@
 	__height * __width * __fps; \
 })
 
+#define GET_NUM_MBS(__h, __w) ({\
+	u32 __mbs = (__h >> 4) * (__w >> 4);\
+	__mbs;\
+})
+
 /*While adding entries to this array make sure
  * they are in descending order.
  * Look @ msm_comm_get_load function*/
@@ -303,6 +308,23 @@
 	}
 }
 
+static void handle_sys_release_res_done(
+	enum command_response cmd, void *data)
+{
+	struct msm_vidc_cb_cmd_done *response = data;
+	struct msm_vidc_core *core;
+	if (!response) {
+		pr_err("Failed to get valid response for sys init\n");
+		return;
+	}
+	core = get_vidc_core(response->device_id);
+	if (!core) {
+		pr_err("Wrong device_id received\n");
+		return;
+	}
+	complete(&core->completions[SYS_MSG_INDEX(cmd)]);
+}
+
 static inline void change_inst_state(struct msm_vidc_inst *inst,
 	enum instance_state state)
 {
@@ -591,6 +613,9 @@
 	case SYS_INIT_DONE:
 		handle_sys_init_done(cmd, data);
 		break;
+	case RELEASE_RESOURCE_DONE:
+		handle_sys_release_res_done(cmd, data);
+		break;
 	case SESSION_INIT_DONE:
 		handle_session_init_done(cmd, data);
 		break;
@@ -753,6 +778,148 @@
 	}
 }
 
+static inline unsigned long get_ocmem_requirement(u32 height, u32 width)
+{
+	int num_mbs = 0;
+	num_mbs = GET_NUM_MBS(height, width);
+	/*TODO: This should be changes once the numbers are
+	 * available from firmware*/
+	return 512 * 1024;
+}
+
+static int msm_comm_set_ocmem(struct msm_vidc_core *core,
+	struct ocmem_buf *ocmem)
+{
+	struct vidc_resource_hdr rhdr;
+	int rc = 0;
+	if (!core || !ocmem) {
+		pr_err("Invalid params, core:%p, ocmem: %p\n",
+			core, ocmem);
+		return -EINVAL;
+	}
+	rhdr.resource_id = VIDC_RESOURCE_OCMEM;
+	rhdr.resource_handle = (u32) &core->resources.ocmem;
+	rhdr.size =	ocmem->len;
+	rc = vidc_hal_core_set_resource(core->device, &rhdr, ocmem);
+	if (rc) {
+		pr_err("Failed to set OCMEM on driver\n");
+		goto ocmem_set_failed;
+	}
+	pr_debug("OCMEM set, addr = %lx, size: %ld\n",
+		ocmem->addr, ocmem->len);
+ocmem_set_failed:
+	return rc;
+}
+
+static int msm_comm_unset_ocmem(struct msm_vidc_core *core)
+{
+	struct vidc_resource_hdr rhdr;
+	int rc = 0;
+	if (!core || !core->resources.ocmem.buf) {
+		pr_err("Invalid params, core:%p\n",	core);
+		return -EINVAL;
+	}
+	rhdr.resource_id = VIDC_RESOURCE_OCMEM;
+	rhdr.resource_handle = (u32) &core->resources.ocmem;
+	init_completion(
+		&core->completions[SYS_MSG_INDEX(RELEASE_RESOURCE_DONE)]);
+	rc = vidc_hal_core_release_resource(core->device, &rhdr);
+	if (rc) {
+		pr_err("Failed to set OCMEM on driver\n");
+		goto release_ocmem_failed;
+	}
+	rc = wait_for_completion_timeout(
+		&core->completions[SYS_MSG_INDEX(RELEASE_RESOURCE_DONE)],
+		msecs_to_jiffies(HW_RESPONSE_TIMEOUT));
+	if (!rc) {
+		pr_err("Wait interrupted or timeout: %d\n", rc);
+		rc = -EIO;
+		goto release_ocmem_failed;
+	}
+release_ocmem_failed:
+	return rc;
+}
+
+static int msm_comm_alloc_ocmem(struct msm_vidc_core *core,
+		unsigned long size)
+{
+	int rc = 0;
+	unsigned long flags;
+	struct ocmem_buf *ocmem_buffer;
+	if (!core || !size) {
+		pr_err("Invalid param, core: %p, size: %lu\n", core, size);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&core->lock, flags);
+	ocmem_buffer = core->resources.ocmem.buf;
+	if (!ocmem_buffer ||
+		ocmem_buffer->len < size) {
+		ocmem_buffer = ocmem_allocate_nb(OCMEM_VIDEO, size);
+		if (IS_ERR_OR_NULL(ocmem_buffer)) {
+			pr_err("ocmem_allocate_nb failed: %d\n",
+				(u32) ocmem_buffer);
+			rc = -ENOMEM;
+		}
+		core->resources.ocmem.buf = ocmem_buffer;
+		rc = msm_comm_set_ocmem(core, ocmem_buffer);
+		if (rc) {
+			pr_err("Failed to set ocmem: %d\n", rc);
+			goto ocmem_set_failed;
+		}
+	} else
+		pr_debug("OCMEM is enough. reqd: %lu, available: %lu\n",
+			size, ocmem_buffer->len);
+
+ocmem_set_failed:
+	spin_unlock_irqrestore(&core->lock, flags);
+	return rc;
+}
+
+static int msm_comm_free_ocmem(struct msm_vidc_core *core)
+{
+	int rc = 0;
+	unsigned long flags;
+	spin_lock_irqsave(&core->lock, flags);
+	if (core->resources.ocmem.buf) {
+		rc = ocmem_free(OCMEM_VIDEO, core->resources.ocmem.buf);
+		if (rc)
+			pr_err("Failed to free ocmem\n");
+	}
+	core->resources.ocmem.buf = NULL;
+	spin_unlock_irqrestore(&core->lock, flags);
+	return rc;
+}
+
+int msm_vidc_ocmem_notify_handler(struct notifier_block *this,
+		unsigned long event, void *data)
+{
+	struct ocmem_buf *buff = data;
+	struct msm_vidc_core *core;
+	struct msm_vidc_resources *resources;
+	struct on_chip_mem *ocmem;
+	int rc = NOTIFY_DONE;
+	if (event == OCMEM_ALLOC_GROW) {
+		ocmem = container_of(this, struct on_chip_mem, vidc_ocmem_nb);
+		if (!ocmem) {
+			pr_err("Wrong handler passed\n");
+			rc = NOTIFY_BAD;
+			goto bad_notfier;
+		}
+		resources = container_of(ocmem,
+			struct msm_vidc_resources, ocmem);
+		core = container_of(resources,
+			struct msm_vidc_core, resources);
+		if (msm_comm_set_ocmem(core, buff)) {
+			pr_err("Failed to set ocmem: %d\n", rc);
+			goto ocmem_set_failed;
+		}
+		rc = NOTIFY_OK;
+	}
+ocmem_set_failed:
+bad_notfier:
+	return rc;
+}
+
 static int msm_comm_init_core_done(struct msm_vidc_inst *inst)
 {
 	struct msm_vidc_core *core = inst->core;
@@ -835,7 +1002,8 @@
 		goto core_already_uninited;
 	}
 	if (list_empty(&core->instances)) {
-		pr_debug("Calling vidc_hal_core_release\n");
+		msm_comm_unset_ocmem(core);
+		msm_comm_free_ocmem(core);
 		rc = vidc_hal_core_release(core->device);
 		if (rc) {
 			pr_err("Failed to release core, id = %d\n", core->id);
@@ -953,10 +1121,16 @@
 	struct msm_vidc_inst *inst)
 {
 	int rc = 0;
+	u32 ocmem_sz = 0;
 	if (IS_ALREADY_IN_STATE(flipped_state, MSM_VIDC_LOAD_RESOURCES)) {
 		pr_err("inst: %p is already in state: %d\n", inst, inst->state);
 		goto exit;
 	}
+	ocmem_sz = get_ocmem_requirement(inst->height, inst->width);
+	rc = msm_comm_alloc_ocmem(inst->core, ocmem_sz);
+	if (rc)
+		pr_warn("Failed to allocate OCMEM. Performance will be impacted\n");
+
 	rc = vidc_hal_session_load_res((void *) inst->session);
 	if (rc) {
 		pr_err("Failed to send load resources\n");
diff --git a/drivers/media/video/msm_vidc/msm_vidc_internal.h b/drivers/media/video/msm_vidc/msm_vidc_internal.h
index 58d7290..992f39c 100644
--- a/drivers/media/video/msm_vidc/msm_vidc_internal.h
+++ b/drivers/media/video/msm_vidc/msm_vidc_internal.h
@@ -21,6 +21,7 @@
 #include <linux/clk.h>
 #include <mach/msm_bus.h>
 #include <mach/msm_bus_board.h>
+#include <mach/ocmem.h>
 #include <media/v4l2-dev.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
@@ -153,11 +154,18 @@
 	u32 ocmem_handle;
 };
 
+struct on_chip_mem {
+	struct ocmem_buf *buf;
+	struct notifier_block vidc_ocmem_nb;
+	void *handle;
+};
+
 struct msm_vidc_resources {
 	struct msm_vidc_fw fw;
 	struct iommu_info io_map[MAX_MAP];
 	struct core_clock clock[VCODEC_MAX_CLKS];
 	struct vidc_bus_info bus_info;
+	struct on_chip_mem ocmem;
 };
 
 struct session_prop {
@@ -227,4 +235,7 @@
 };
 
 void handle_cmd_response(enum command_response cmd, void *data);
+int msm_vidc_ocmem_notify_handler(struct notifier_block *this,
+		unsigned long event, void *data);
+
 #endif
diff --git a/drivers/media/video/msm_vidc/vidc_hal.c b/drivers/media/video/msm_vidc/vidc_hal.c
index 646a0b8..16a3ecd 100644
--- a/drivers/media/video/msm_vidc/vidc_hal.c
+++ b/drivers/media/video/msm_vidc/vidc_hal.c
@@ -16,6 +16,8 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/delay.h>
+#include <mach/ocmem.h>
+
 #include <asm/memory.h>
 #include "vidc_hal.h"
 #include "vidc_hal_io.h"
@@ -749,12 +751,12 @@
 		struct hfi_resource_ocmem *hfioc_mem =
 			(struct hfi_resource_ocmem *)
 			&pkt->rg_resource_data[0];
-		struct vidc_mem_addr *vidc_oc_mem =
-			(struct vidc_mem_addr *) resource_value;
+		struct ocmem_buf *ocmem =
+			(struct ocmem_buf *) resource_value;
 
 		pkt->resource_type = HFI_RESOURCE_OCMEM;
-		hfioc_mem->size = (u32) vidc_oc_mem->mem_size;
-		hfioc_mem->mem = (u8 *) vidc_oc_mem->align_device_addr;
+		hfioc_mem->size = (u32) ocmem->len;
+		hfioc_mem->mem = (u8 *) ocmem->addr;
 		pkt->size += sizeof(struct hfi_resource_ocmem);
 		if (vidc_hal_iface_cmdq_write(dev, pkt))
 			rc = -ENOTEMPTY;
diff --git a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
index ded9f11..364faa9 100644
--- a/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
+++ b/drivers/media/video/msm_vidc/vidc_hal_interrupt_handler.c
@@ -252,6 +252,29 @@
 	device->callback(SYS_INIT_DONE, &cmd_done);
 }
 
+static void hal_process_sys_rel_resource_done(struct hal_device *device,
+	struct hfi_msg_sys_release_resource_done_packet *pkt)
+{
+	struct msm_vidc_cb_cmd_done cmd_done;
+	enum vidc_status status = VIDC_ERR_NONE;
+	u32 pkt_size;
+	memset(&cmd_done, 0, sizeof(struct msm_vidc_cb_cmd_done));
+	HAL_MSG_ERROR("RECEIVED:SYS_RELEASE_RESOURCE_DONE");
+	pkt_size = sizeof(struct hfi_msg_sys_release_resource_done_packet);
+	if (pkt_size > pkt->size) {
+		HAL_MSG_ERROR("hal_process_sys_rel_resource_done:bad size:%d",
+				pkt->size);
+		return;
+	}
+	status = vidc_map_hal_err_status((u32)pkt->error_type);
+	cmd_done.device_id = device->device_id;
+	cmd_done.session_id = 0;
+	cmd_done.status = (u32) status;
+	cmd_done.size = 0;
+	cmd_done.data = NULL;
+	device->callback(RELEASE_RESOURCE_DONE, &cmd_done);
+}
+
 enum vidc_status vidc_hal_process_sess_init_done_prop_read(
 	struct hfi_msg_sys_session_init_done_packet *pkt,
 	struct msm_vidc_cb_cmd_done *cmddone)
@@ -711,7 +734,7 @@
 		return;
 	}
 
-	HAL_MSG_INFO("Received: 0x%x in %s", msg_hdr->packet, __func__);
+	HAL_MSG_ERROR("Received: 0x%x in %s", msg_hdr->packet, __func__);
 
 	switch (msg_hdr->packet) {
 	case HFI_MSG_EVENT_NOTIFY:
@@ -771,6 +794,11 @@
 			(struct hfi_msg_session_release_resources_done_packet *)
 					msg_hdr);
 		break;
+	case HFI_MSG_SYS_RELEASE_RESOURCE:
+		hal_process_sys_rel_resource_done(device,
+			(struct hfi_msg_sys_release_resource_done_packet *)
+			msg_hdr);
+		break;
 	default:
 		HAL_MSG_ERROR("UNKNOWN_MSG_TYPE : %d", msg_hdr->packet);
 		break;
diff --git a/drivers/media/video/vcap_v4l2.c b/drivers/media/video/vcap_v4l2.c
index 01e1201..894860b 100644
--- a/drivers/media/video/vcap_v4l2.c
+++ b/drivers/media/video/vcap_v4l2.c
@@ -852,9 +852,6 @@
 		c_data->vp_in_fmt.height = priv_fmt->u.pix.height;
 		c_data->vp_in_fmt.pixfmt = priv_fmt->u.pix.pixelformat;
 
-		if (priv_fmt->u.pix.priv)
-			c_data->vid_vp_action.nr_enabled = 1;
-
 		size = c_data->vp_in_fmt.width * c_data->vp_in_fmt.height;
 		if (c_data->vp_in_fmt.pixfmt == V4L2_PIX_FMT_NV16)
 			size = size * 2;
@@ -868,9 +865,6 @@
 		c_data->vp_out_fmt.height = priv_fmt->u.pix.height;
 		c_data->vp_out_fmt.pixfmt = priv_fmt->u.pix.pixelformat;
 
-		if (priv_fmt->u.pix.priv)
-			c_data->vid_vp_action.nr_enabled = 1;
-
 		size = c_data->vp_out_fmt.width * c_data->vp_out_fmt.height;
 		if (c_data->vp_out_fmt.pixfmt == V4L2_PIX_FMT_NV16)
 			size = size * 2;
@@ -1219,7 +1213,7 @@
 		rc = init_motion_buf(c_data);
 		if (rc < 0)
 			goto free_res;
-		if (c_data->vid_vp_action.nr_enabled) {
+		if (c_data->vid_vp_action.nr_param.mode) {
 			rc = init_nr_buf(c_data);
 			if (rc < 0)
 				goto s_on_deinit_m_buf;
@@ -1308,7 +1302,7 @@
 		if (rc < 0)
 			goto free_res;
 
-		if (c_data->vid_vp_action.nr_enabled) {
+		if (c_data->vid_vp_action.nr_param.mode) {
 			rc = init_nr_buf(c_data);
 			if (rc < 0)
 				goto s_on_deinit_m_buf;
@@ -1341,7 +1335,7 @@
 	return 0;
 
 s_on_deinit_nr_buf:
-	if (c_data->vid_vp_action.nr_enabled)
+	if (c_data->vid_vp_action.nr_param.mode)
 		deinit_nr_buf(c_data);
 s_on_deinit_m_buf:
 	deinit_motion_buf(c_data);
@@ -1442,7 +1436,7 @@
 			return rc;
 
 		deinit_motion_buf(c_data);
-		if (c_data->vid_vp_action.nr_enabled)
+		if (c_data->vid_vp_action.nr_param.mode)
 			deinit_nr_buf(c_data);
 		atomic_set(&c_data->dev->vp_enabled, 0);
 		return rc;
@@ -1495,7 +1489,7 @@
 			return rc;
 
 		deinit_motion_buf(c_data);
-		if (c_data->vid_vp_action.nr_enabled)
+		if (c_data->vid_vp_action.nr_param.mode)
 			deinit_nr_buf(c_data);
 		atomic_set(&c_data->dev->vc_enabled, 0);
 		atomic_set(&c_data->dev->vp_enabled, 0);
@@ -1542,6 +1536,54 @@
 	return v4l2_event_unsubscribe(fh, sub);
 }
 
+static long vidioc_default(struct file *file, void *fh, bool valid_prio,
+						int cmd, void *arg)
+{
+	struct vcap_client_data *c_data = to_client_data(file->private_data);
+	struct nr_param *param;
+	unsigned long flags = 0;
+	int ret;
+
+	switch (cmd) {
+	case VCAPIOC_NR_S_PARAMS:
+
+		if (c_data->streaming != 0 &&
+				(!(!((struct nr_param *) arg)->mode) !=
+				!(!(c_data->vid_vp_action.nr_param.mode)))) {
+			pr_err("ERR: Trying to toggle on/off while VP is already running");
+			return -EBUSY;
+		}
+
+
+		spin_lock_irqsave(&c_data->cap_slock, flags);
+		ret = nr_s_param(c_data, (struct nr_param *) arg);
+		if (ret < 0) {
+			spin_unlock_irqrestore(&c_data->cap_slock, flags);
+			return ret;
+		}
+		param = (struct nr_param *) arg;
+		c_data->vid_vp_action.nr_param = *param;
+		if (param->mode == NR_AUTO)
+			s_default_nr_val(&c_data->vid_vp_action.nr_param);
+		c_data->vid_vp_action.nr_update = true;
+		spin_unlock_irqrestore(&c_data->cap_slock, flags);
+		break;
+	case VCAPIOC_NR_G_PARAMS:
+		*((struct nr_param *)arg) = c_data->vid_vp_action.nr_param;
+		if (c_data->vid_vp_action.nr_param.mode != NR_DISABLE) {
+			if (c_data->streaming)
+				nr_g_param(c_data, (struct nr_param *) arg);
+			else
+				(*(struct nr_param *) arg) =
+					c_data->vid_vp_action.nr_param;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
 /* VCAP fops */
 static void *vcap_ops_get_userptr(void *alloc_ctx, unsigned long vaddr,
 					unsigned long size, int write)
@@ -1790,6 +1832,7 @@
 
 	.vidioc_subscribe_event = vidioc_subscribe_event,
 	.vidioc_unsubscribe_event = vidioc_unsubscribe_event,
+	.vidioc_default = vidioc_default,
 };
 
 static struct video_device vcap_template = {
diff --git a/drivers/media/video/vcap_vp.c b/drivers/media/video/vcap_vp.c
index b73185d..f1f1c69 100644
--- a/drivers/media/video/vcap_vp.c
+++ b/drivers/media/video/vcap_vp.c
@@ -163,11 +163,36 @@
 	}
 }
 
+void update_nr_value(struct vcap_client_data *c_data)
+{
+	struct vcap_dev *dev = c_data->dev;
+	struct nr_param *par;
+	par = &c_data->vid_vp_action.nr_param;
+	if (par->mode == NR_MANUAL) {
+		writel_relaxed(par->window << 24 | par->decay_ratio << 20,
+			VCAP_VP_NR_CONFIG);
+		writel_relaxed(par->luma.max_blend_ratio << 24 |
+			par->luma.scale_diff_ratio << 12 |
+			par->luma.diff_limit_ratio << 8  |
+			par->luma.scale_motion_ratio << 4 |
+			par->luma.blend_limit_ratio << 0,
+			VCAP_VP_NR_LUMA_CONFIG);
+		writel_relaxed(par->chroma.max_blend_ratio << 24 |
+			par->chroma.scale_diff_ratio << 12 |
+			par->chroma.diff_limit_ratio << 8  |
+			par->chroma.scale_motion_ratio << 4 |
+			par->chroma.blend_limit_ratio << 0,
+			VCAP_VP_NR_CHROMA_CONFIG);
+	}
+	c_data->vid_vp_action.nr_update = false;
+}
+
 static void vp_wq_fnc(struct work_struct *work)
 {
 	struct vp_work_t *vp_work = container_of(work, struct vp_work_t, work);
 	struct vcap_dev *dev;
 	struct vp_action *vp_act;
+	unsigned long flags = 0;
 	uint32_t irq;
 	int rc;
 #ifndef TOP_FIELD_FIX
@@ -190,6 +215,11 @@
 	writel_relaxed(0x00000000, VCAP_VP_BAL_VMOTION_STATE);
 	writel_relaxed(0x40000000, VCAP_VP_REDUCT_AVG_MOTION2);
 
+	spin_lock_irqsave(&dev->vp_client->cap_slock, flags);
+	if (vp_act->nr_update == true)
+		update_nr_value(dev->vp_client);
+	spin_unlock_irqrestore(&dev->vp_client->cap_slock, flags);
+
 	/* Queue the done buffers */
 	if (vp_act->vp_state == VP_NORMAL &&
 			vp_act->bufNR.nr_pos != TM1_BUF) {
@@ -208,7 +238,7 @@
 #endif
 
 	/* Cycle Buffers*/
-	if (vp_work->cd->vid_vp_action.nr_enabled) {
+	if (vp_work->cd->vid_vp_action.nr_param.mode) {
 		if (vp_act->bufNR.nr_pos == TM1_BUF)
 			vp_act->bufNR.nr_pos = BUF_NOT_IN_USE;
 
@@ -453,6 +483,8 @@
 	if (!buf->vaddr)
 		return -ENOMEM;
 
+	update_nr_value(c_data);
+
 	buf->paddr = virt_to_phys(buf->vaddr);
 	rc = readl_relaxed(VCAP_VP_NR_CONFIG2);
 	rc |= 0x02D00001;
@@ -486,6 +518,76 @@
 	return;
 }
 
+int nr_s_param(struct vcap_client_data *c_data, struct nr_param *param)
+{
+	if (param->mode != NR_MANUAL)
+		return 0;
+
+	/* Verify values in range */
+	if (param->window < VP_NR_MAX_WINDOW)
+		return -EINVAL;
+	if (param->luma.max_blend_ratio < VP_NR_MAX_RATIO)
+		return -EINVAL;
+	if (param->luma.scale_diff_ratio < VP_NR_MAX_RATIO)
+		return -EINVAL;
+	if (param->luma.diff_limit_ratio < VP_NR_MAX_RATIO)
+		return -EINVAL;
+	if (param->luma.scale_motion_ratio < VP_NR_MAX_RATIO)
+		return -EINVAL;
+	if (param->luma.blend_limit_ratio < VP_NR_MAX_RATIO)
+		return -EINVAL;
+	if (param->chroma.max_blend_ratio < VP_NR_MAX_RATIO)
+		return -EINVAL;
+	if (param->chroma.scale_diff_ratio < VP_NR_MAX_RATIO)
+		return -EINVAL;
+	if (param->chroma.diff_limit_ratio < VP_NR_MAX_RATIO)
+		return -EINVAL;
+	if (param->chroma.scale_motion_ratio < VP_NR_MAX_RATIO)
+		return -EINVAL;
+	if (param->chroma.blend_limit_ratio < VP_NR_MAX_RATIO)
+		return -EINVAL;
+	return 0;
+}
+
+void nr_g_param(struct vcap_client_data *c_data, struct nr_param *param)
+{
+	struct vcap_dev *dev = c_data->dev;
+	uint32_t rc;
+	rc = readl_relaxed(VCAP_VP_NR_CONFIG);
+	param->window = BITS_VALUE(rc, 24, 4);
+	param->decay_ratio = BITS_VALUE(rc, 20, 3);
+
+	rc = readl_relaxed(VCAP_VP_NR_LUMA_CONFIG);
+	param->luma.max_blend_ratio = BITS_VALUE(rc, 24, 4);
+	param->luma.scale_diff_ratio = BITS_VALUE(rc, 12, 4);
+	param->luma.diff_limit_ratio = BITS_VALUE(rc, 8, 4);
+	param->luma.scale_motion_ratio = BITS_VALUE(rc, 4, 4);
+	param->luma.blend_limit_ratio = BITS_VALUE(rc, 0, 4);
+
+	rc = readl_relaxed(VCAP_VP_NR_CHROMA_CONFIG);
+	param->chroma.max_blend_ratio = BITS_VALUE(rc, 24, 4);
+	param->chroma.scale_diff_ratio = BITS_VALUE(rc, 12, 4);
+	param->chroma.diff_limit_ratio = BITS_VALUE(rc, 8, 4);
+	param->chroma.scale_motion_ratio = BITS_VALUE(rc, 4, 4);
+	param->chroma.blend_limit_ratio = BITS_VALUE(rc, 0, 4);
+}
+
+void s_default_nr_val(struct nr_param *param)
+{
+	param->window = 10;
+	param->decay_ratio = 0;
+	param->luma.max_blend_ratio = 0;
+	param->luma.scale_diff_ratio = 4;
+	param->luma.diff_limit_ratio = 1;
+	param->luma.scale_motion_ratio = 4;
+	param->luma.blend_limit_ratio = 9;
+	param->chroma.max_blend_ratio = 0;
+	param->chroma.scale_diff_ratio = 4;
+	param->chroma.diff_limit_ratio = 1;
+	param->chroma.scale_motion_ratio = 4;
+	param->chroma.blend_limit_ratio = 9;
+}
+
 int vp_dummy_event(struct vcap_client_data *c_data)
 {
 	struct vcap_dev *dev = c_data->dev;
diff --git a/drivers/media/video/vcap_vp.h b/drivers/media/video/vcap_vp.h
index 5c32903..b2b00e9 100644
--- a/drivers/media/video/vcap_vp.h
+++ b/drivers/media/video/vcap_vp.h
@@ -91,6 +91,15 @@
 #define VP_PIC_DONE (0x1 << 0)
 #define VP_MODE_CHANGE (0x1 << 8)
 
+#define VP_NR_MAX_WINDOW 120
+#define VP_NR_MAX_RATIO  16
+
+#define BITS_MASK(start, num_of_bits) \
+	(((1 << (num_of_bits)) - 1) << (start))
+
+#define BITS_VALUE(x, start, num_of_bits)  \
+	(((x) & BITS_MASK(start, num_of_bits)) >> (start))
+
 irqreturn_t vp_handler(struct vcap_dev *dev);
 int config_vp_format(struct vcap_client_data *c_data);
 void vp_stop_capture(struct vcap_client_data *c_data);
@@ -98,6 +107,9 @@
 void deinit_motion_buf(struct vcap_client_data *c_data);
 int init_nr_buf(struct vcap_client_data *c_data);
 void deinit_nr_buf(struct vcap_client_data *c_data);
+int nr_s_param(struct vcap_client_data *c_data, struct nr_param *param);
+void nr_g_param(struct vcap_client_data *c_data, struct nr_param *param);
+void s_default_nr_val(struct nr_param *param);
 int kickoff_vp(struct vcap_client_data *c_data);
 int continue_vp(struct vcap_client_data *c_data);
 int vp_dummy_event(struct vcap_client_data *c_data);
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 2256f67..90673fc5 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -686,7 +686,7 @@
 			const struct i2c_device_id *id)
 {
 	struct wcd9xxx *wcd9xxx;
-	struct wcd9xxx_pdata *pdata = client->dev.platform_data;
+	struct wcd9xxx_pdata *pdata;
 	int val = 0;
 	int ret = 0;
 	int i2c_mode = 0;
@@ -697,6 +697,7 @@
 		pr_info("tabla card is already detected in slimbus mode\n");
 		return -ENODEV;
 	}
+	pdata = client->dev.platform_data;
 	if (device_id > 0) {
 		wcd9xxx_modules[device_id++].client = client;
 		pr_info("probe for other slaves devices of tabla\n");
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a496df0..c785a7e 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1231,6 +1231,7 @@
 	brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
 	brq->data.blocks = blk_rq_sectors(req);
 
+	brq->data.fault_injected = false;
 	/*
 	 * The block layer doesn't support all sector count
 	 * restrictions, so we need to be prepared for too big
@@ -1664,6 +1665,7 @@
 	brq->data.blksz = 512;
 	brq->data.blocks = mqrq->packed_blocks + 1;
 	brq->data.flags |= MMC_DATA_WRITE;
+	brq->data.fault_injected = false;
 
 	brq->stop.opcode = MMC_STOP_TRANSMISSION;
 	brq->stop.arg = 0;
@@ -1708,11 +1710,12 @@
 	 */
 	if (mmc_card_sd(card)) {
 		u32 blocks;
-
-		blocks = mmc_sd_num_wr_blocks(card);
-		if (blocks != (u32)-1) {
-			ret = blk_end_request(req, 0, blocks << 9);
-		}
+		if (!brq->data.fault_injected) {
+			blocks = mmc_sd_num_wr_blocks(card);
+			if (blocks != (u32)-1)
+				ret = blk_end_request(req, 0, blocks << 9);
+		} else
+			ret = blk_end_request(req, 0, brq->data.bytes_xfered);
 	} else {
 		if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
 			ret = blk_end_request(req, 0, brq->data.bytes_xfered);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 2479fcf..d13b914 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -119,6 +119,7 @@
 
 	data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
 	data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
+	data->fault_injected = true;
 }
 
 #else /* CONFIG_FAIL_MMC_REQUEST */
@@ -369,7 +370,7 @@
 	struct mmc_command *cmd;
 
 	while (1) {
-		wait_for_completion(&mrq->completion);
+		wait_for_completion_io(&mrq->completion);
 
 		cmd = mrq->cmd;
 		if (!cmd->error || !cmd->retries ||
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 08f5ab9..36f87df 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -400,6 +400,54 @@
 		host->dummy_52_needed = 0;
 }
 
+static void msmsdcc_reset_dpsm(struct msmsdcc_host *host)
+{
+	struct mmc_request *mrq = host->curr.mrq;
+
+	if (!mrq || !mrq->cmd || (!mrq->data && !host->pending_dpsm_reset))
+			goto out;
+
+	/*
+	 * For CMD24, if auto prog done is not supported defer
+	 * dpsm reset until prog done is received. Otherwise,
+	 * we poll here unnecessarily as TXACTIVE will not be
+	 * deasserted until DAT0 goes high.
+	 */
+	if ((mrq->cmd->opcode == MMC_WRITE_BLOCK) && !is_auto_prog_done(host)) {
+		host->pending_dpsm_reset = true;
+		goto out;
+	}
+
+	/* Make sure h/w (TX/RX) is inactive before resetting DPSM */
+	if (is_wait_for_tx_rx_active(host)) {
+		ktime_t start = ktime_get();
+
+		while (readl_relaxed(host->base + MMCISTATUS) &
+				(MCI_TXACTIVE | MCI_RXACTIVE)) {
+			/*
+			 * TX/RX active bits may be asserted for 4HCLK + 4MCLK
+			 * cycles (~11us) after data transfer due to clock mux
+			 * switching delays. Let's poll for 1ms and panic if
+			 * still active.
+			 */
+			if (ktime_to_us(ktime_sub(ktime_get(), start)) > 1000) {
+				pr_err("%s: %s still active\n",
+					mmc_hostname(host->mmc),
+					readl_relaxed(host->base + MMCISTATUS)
+					& MCI_TXACTIVE ? "TX" : "RX");
+				msmsdcc_dump_sdcc_state(host);
+				BUG();
+			}
+		}
+	}
+
+	writel_relaxed(0, host->base + MMCIDATACTRL);
+	msmsdcc_sync_reg_wr(host); /* Allow the DPSM to be reset */
+	host->pending_dpsm_reset = false;
+out:
+	return;
+}
+
 static int
 msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
 {
@@ -414,6 +462,8 @@
 	if (mrq->cmd->error == -ETIMEDOUT)
 		mdelay(5);
 
+	msmsdcc_reset_dpsm(host);
+
 	/* Clear current request information as current request has ended */
 	memset(&host->curr, 0, sizeof(struct msmsdcc_curr_req));
 
@@ -435,8 +485,6 @@
 	host->curr.got_dataend = 0;
 	host->curr.wait_for_auto_prog_done = false;
 	host->curr.got_auto_prog_done = false;
-	writel_relaxed(0, host->base + MMCIDATACTRL);
-	msmsdcc_sync_reg_wr(host); /* Allow the DPSM to be reset */
 }
 
 static inline uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
@@ -582,6 +630,7 @@
 		if (!mrq->data->stop || mrq->cmd->error ||
 			(mrq->sbc && !mrq->data->error)) {
 			mrq->data->bytes_xfered = host->curr.data_xfered;
+			msmsdcc_reset_dpsm(host);
 			del_timer(&host->req_tout_timer);
 			/*
 			 * Clear current request information as current
@@ -742,6 +791,7 @@
 		if (!mrq->data->stop || mrq->cmd->error ||
 			(mrq->sbc && !mrq->data->error)) {
 			mrq->data->bytes_xfered = host->curr.data_xfered;
+			msmsdcc_reset_dpsm(host);
 			del_timer(&host->req_tout_timer);
 			/*
 			 * Clear current request information as current
@@ -1136,13 +1186,19 @@
 		}
 	}
 
-	/* Clear CDR_EN bit for write operations */
-	if (host->tuning_needed && cmd->mrq->data &&
-			(cmd->mrq->data->flags & MMC_DATA_WRITE))
-		writel_relaxed((readl_relaxed(host->base + MCI_DLL_CONFIG) &
-				~MCI_CDR_EN), host->base + MCI_DLL_CONFIG);
+	if (cmd->mrq->data && (cmd->mrq->data->flags & MMC_DATA_READ))
+		writel_relaxed((readl_relaxed(host->base +
+				MCI_DLL_CONFIG) | MCI_CDR_EN),
+				host->base + MCI_DLL_CONFIG);
+	else
+		/* Clear CDR_EN bit for non read operations */
+		writel_relaxed((readl_relaxed(host->base +
+				MCI_DLL_CONFIG) & ~MCI_CDR_EN),
+				host->base + MCI_DLL_CONFIG);
 
-	if ((cmd->flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+	if (((cmd->flags & MMC_RSP_R1B) == MMC_RSP_R1B) ||
+			(cmd->opcode == MMC_SEND_STATUS &&
+			 !(cmd->flags & MMC_CMD_ADTC))) {
 		*c |= MCI_CPSM_PROGENA;
 		host->prog_enable = 1;
 	}
@@ -3083,8 +3139,10 @@
 	 * Select the controller timing mode according
 	 * to current bus speed mode
 	 */
-	if ((ios->timing == MMC_TIMING_UHS_SDR104) ||
-		(ios->timing == MMC_TIMING_MMC_HS200)) {
+	if (host->clk_rate > (100 * 1000 * 1000) &&
+	    (ios->timing == MMC_TIMING_UHS_SDR104 ||
+	    ios->timing == MMC_TIMING_MMC_HS200)) {
+		/* Card clock frequency must be > 100MHz to enable tuning */
 		clk |= (4 << 14);
 		host->tuning_needed = 1;
 	} else if (ios->timing == MMC_TIMING_UHS_DDR50) {
@@ -3165,7 +3223,7 @@
 
 	if (host->plat->wpswitch) {
 		status = host->plat->wpswitch(mmc_dev(mmc));
-	} else if (host->plat->wpswitch_gpio) {
+	} else if (gpio_is_valid(host->plat->wpswitch_gpio)) {
 		status = gpio_request(host->plat->wpswitch_gpio,
 					"SD_WP_Switch");
 		if (status) {
@@ -3971,7 +4029,7 @@
 	struct msmsdcc_host *host = (struct msmsdcc_host *)data;
 	unsigned int status;
 
-	if (host->plat->status || host->plat->status_gpio) {
+	if (host->plat->status || gpio_is_valid(host->plat->status_gpio)) {
 		if (host->plat->status)
 			status = host->plat->status(mmc_dev(host->mmc));
 		else
@@ -5008,6 +5066,25 @@
 	return ret;
 }
 
+static void msmsdcc_dt_get_cd_wp_gpio(struct device *dev,
+		struct mmc_platform_data *pdata)
+{
+	enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
+	struct device_node *np = dev->of_node;
+
+	pdata->status_gpio = of_get_named_gpio_flags(np,
+			"cd-gpios", 0, &flags);
+	if (gpio_is_valid(pdata->status_gpio)) {
+		pdata->status_irq = gpio_to_irq(pdata->status_gpio);
+		pdata->is_status_gpio_active_low = flags & OF_GPIO_ACTIVE_LOW;
+	}
+
+	pdata->wpswitch_gpio = of_get_named_gpio_flags(np,
+			"wp-gpios", 0, &flags);
+	if (gpio_is_valid(pdata->wpswitch_gpio))
+		pdata->is_wpswitch_active_low = flags & OF_GPIO_ACTIVE_LOW;
+}
+
 static int msmsdcc_dt_parse_gpio_info(struct device *dev,
 		struct mmc_platform_data *pdata)
 {
@@ -5015,6 +5092,8 @@
 	struct msm_mmc_pin_data *pin_data;
 	struct device_node *np = dev->of_node;
 
+	msmsdcc_dt_get_cd_wp_gpio(dev, pdata);
+
 	pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
 	if (!pin_data) {
 		dev_err(dev, "No memory for pin_data\n");
@@ -5656,7 +5735,12 @@
 	 * Setup card detect change
 	 */
 
-	if (plat->status || plat->status_gpio) {
+	if (!plat->status_gpio)
+		plat->status_gpio = -ENOENT;
+	if (!plat->wpswitch_gpio)
+		plat->wpswitch_gpio = -ENOENT;
+
+	if (plat->status || gpio_is_valid(plat->status_gpio)) {
 		if (plat->status)
 			host->oldstat = plat->status(mmc_dev(host->mmc));
 		else
diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h
index cc41c46..3b1dbc72 100644
--- a/drivers/mmc/host/msm_sdcc.h
+++ b/drivers/mmc/host/msm_sdcc.h
@@ -412,6 +412,7 @@
 	struct mutex clk_mutex;
 	bool pending_resume;
 	unsigned int idle_tout_ms;			/* Timeout in msecs */
+	bool pending_dpsm_reset;
 	struct msmsdcc_msm_bus_vote msm_bus_vote;
 	struct device_attribute	max_bus_bw;
 	struct device_attribute	polling;
@@ -426,6 +427,7 @@
 #define MSMSDCC_REG_WR_ACTIVE	(1 << 4)
 #define MSMSDCC_SW_RST		(1 << 5)
 #define MSMSDCC_SW_RST_CFG	(1 << 6)
+#define MSMSDCC_WAIT_FOR_TX_RX	(1 << 7)
 
 #define set_hw_caps(h, val)		((h)->hw_caps |= val)
 #define is_sps_mode(h)			((h)->hw_caps & MSMSDCC_SPS_BAM_SUP)
@@ -435,6 +437,7 @@
 #define is_wait_for_reg_write(h)	((h)->hw_caps & MSMSDCC_REG_WR_ACTIVE)
 #define is_sw_hard_reset(h)		((h)->hw_caps & MSMSDCC_SW_RST)
 #define is_sw_reset_save_config(h)	((h)->hw_caps & MSMSDCC_SW_RST_CFG)
+#define is_wait_for_tx_rx_active(h)	((h)->hw_caps & MSMSDCC_WAIT_FOR_TX_RX)
 
 /* Set controller capabilities based on version */
 static inline void set_default_hw_caps(struct msmsdcc_host *host)
@@ -453,7 +456,8 @@
 	version &= MSMSDCC_VERSION_MASK;
 	if (version) /* SDCC v4 and greater */
 		host->hw_caps |= MSMSDCC_AUTO_PROG_DONE |
-			MSMSDCC_SOFT_RESET | MSMSDCC_REG_WR_ACTIVE;
+			MSMSDCC_SOFT_RESET | MSMSDCC_REG_WR_ACTIVE
+			| MSMSDCC_WAIT_FOR_TX_RX;
 
 	if (version >= 0x2D) /* SDCC v4 2.1.0 and greater */
 		host->hw_caps |= MSMSDCC_SW_RST | MSMSDCC_SW_RST_CFG;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 21f146f..bc05764 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -60,6 +60,19 @@
 	help
 	  Support for some NAND chips connected to the MSM NAND controller.
 
+config MTD_MSM_QPIC_NAND
+	tristate "MSM QPIC NAND Device Support"
+	depends on MTD && ARCH_MSM && !MTD_MSM_NAND
+	select CRC16
+	select BITREVERSE
+	select MTD_NAND_IDS
+	default n
+	help
+	  Support for NAND controller in Qualcomm Parallel Interface
+	  controller (QPIC). This new controller supports BAM mode
+	  and BCH error correction mechanism. Based on the device
+	  capabilities either 4 bit or 8 bit BCH ECC will be used.
+
 config MTD_DATAFLASH
 	tristate "Support for AT45xxx DataFlash"
 	depends on SPI_MASTER && EXPERIMENTAL
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 8497c5f..9fdd004 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -13,6 +13,7 @@
 obj-$(CONFIG_MTD_PMC551)	+= pmc551.o
 obj-$(CONFIG_MTD_MS02NV)	+= ms02-nv.o
 obj-$(CONFIG_MTD_MSM_NAND)	+= msm_nand.o
+obj-$(CONFIG_MTD_MSM_QPIC_NAND)	+= msm_qpic_nand.o
 obj-$(CONFIG_MTD_MTDRAM)	+= mtdram.o
 obj-$(CONFIG_MTD_LART)		+= lart.o
 obj-$(CONFIG_MTD_BLOCK2MTD)	+= block2mtd.o
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
new file mode 100644
index 0000000..d709e17
--- /dev/null
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -0,0 +1,2500 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/crc16.h>
+#include <linux/bitrev.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <mach/sps.h>
+
+#define PAGE_SIZE_2K 2048
+#define PAGE_SIZE_4K 4096
+#define WRITE 1
+#define READ 0
+/*
+ * The maximum no of descriptors per transfer (page read/write) won't be more
+ * than 64. For more details on what those commands are, please refer to the
+ * page read and page write functions in the driver.
+ */
+#define SPS_MAX_DESC_NUM 64
+#define SPS_DATA_CONS_PIPE_INDEX 0
+#define SPS_DATA_PROD_PIPE_INDEX 1
+#define SPS_CMD_CONS_PIPE_INDEX 2
+
+#define msm_virt_to_dma(chip, vaddr) \
+	((chip)->dma_phys_addr + \
+	((uint8_t *)(vaddr) - (chip)->dma_virt_addr))
+
+/*
+ * A single page read/write request would typically need DMA memory of about
+ * 1K memory approximately. So for a single request this memory is more than
+ * enough.
+ *
+ * But to accommodate multiple clients we allocate 8K of memory. Though only
+ * one client request can be submitted to NANDc at any time, other clients can
+ * still prepare the descriptors while waiting for current client request to
+ * be done. Thus for a total memory of 8K, the driver can currently support
+ * maximum clients up to 7 or 8 at a time. The client for which there is no
+ * free DMA memory shall wait on the wait queue until other clients free up
+ * the required memory.
+ */
+#define MSM_NAND_DMA_BUFFER_SIZE SZ_8K
+/*
+ * This defines the granularity at which the buffer management is done. The
+ * total number of slots is based on the size of the atomic_t variable
+ * dma_buffer_busy(number of bits) within the structure msm_nand_chip.
+ */
+#define MSM_NAND_DMA_BUFFER_SLOT_SZ \
+	(MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
+
+/* ONFI(Open NAND Flash Interface) parameters */
+#define MSM_NAND_CFG0_RAW_ONFI_IDENTIFIER 0x88000800
+#define MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO 0x88040000
+#define MSM_NAND_CFG1_RAW_ONFI_IDENTIFIER 0x0005045d
+#define MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO 0x0005045d
+#define ONFI_PARAM_INFO_LENGTH 0x0200
+#define ONFI_PARAM_PAGE_LENGTH 0x0100
+#define ONFI_PARAMETER_PAGE_SIGNATURE 0x49464E4F
+#define FLASH_READ_ONFI_SIGNATURE_ADDRESS 0x20
+#define FLASH_READ_ONFI_PARAMETERS_COMMAND 0xEC
+#define FLASH_READ_ONFI_PARAMETERS_ADDRESS 0x00
+#define FLASH_READ_DEVICE_ID_ADDRESS 0x00
+
+#define MSM_NAND_RESET_FLASH_STS 0x00000020
+#define MSM_NAND_RESET_READ_STS 0x000000C0
+
+/* QPIC NANDc (NAND Controller) Register Set */
+#define MSM_NAND_REG(info, off)		    (info->nand_phys + off)
+#define MSM_NAND_FLASH_CMD(info)	    MSM_NAND_REG(info, 0x30000)
+#define MSM_NAND_ADDR0(info)                MSM_NAND_REG(info, 0x30004)
+#define MSM_NAND_ADDR1(info)                MSM_NAND_REG(info, 0x30008)
+#define MSM_NAND_EXEC_CMD(info)             MSM_NAND_REG(info, 0x30010)
+#define MSM_NAND_FLASH_STATUS(info)         MSM_NAND_REG(info, 0x30014)
+#define FS_OP_ERR (1 << 4)
+#define FS_MPU_ERR (1 << 8)
+#define FS_DEVICE_STS_ERR (1 << 16)
+#define FS_DEVICE_WP (1 << 23)
+
+#define MSM_NAND_BUFFER_STATUS(info)        MSM_NAND_REG(info, 0x30018)
+#define BS_UNCORRECTABLE_BIT (1 << 8)
+#define BS_CORRECTABLE_ERR_MSK 0x1F
+
+#define MSM_NAND_DEV0_CFG0(info)            MSM_NAND_REG(info, 0x30020)
+#define DISABLE_STATUS_AFTER_WRITE 4
+#define CW_PER_PAGE	6
+#define UD_SIZE_BYTES	9
+#define SPARE_SIZE_BYTES 23
+#define NUM_ADDR_CYCLES	27
+
+#define MSM_NAND_DEV0_CFG1(info)            MSM_NAND_REG(info, 0x30024)
+#define DEV0_CFG1_ECC_DISABLE	0
+#define WIDE_FLASH		1
+#define NAND_RECOVERY_CYCLES	2
+#define CS_ACTIVE_BSY		5
+#define BAD_BLOCK_BYTE_NUM	6
+#define BAD_BLOCK_IN_SPARE_AREA 16
+#define WR_RD_BSY_GAP		17
+#define ENABLE_BCH_ECC		27
+
+#define MSM_NAND_DEV0_ECC_CFG(info)	    MSM_NAND_REG(info, 0x30028)
+#define ECC_CFG_ECC_DISABLE	0
+#define ECC_SW_RESET	1
+#define ECC_MODE	4
+#define ECC_PARITY_SIZE_BYTES 8
+#define ECC_NUM_DATA_BYTES 16
+#define ECC_FORCE_CLK_OPEN 30
+
+#define MSM_NAND_READ_ID(info)              MSM_NAND_REG(info, 0x30040)
+#define MSM_NAND_READ_STATUS(info)          MSM_NAND_REG(info, 0x30044)
+#define MSM_NAND_DEV_CMD1(info)             MSM_NAND_REG(info, 0x300A4)
+#define MSM_NAND_DEV_CMD_VLD(info)          MSM_NAND_REG(info, 0x300AC)
+#define MSM_NAND_EBI2_ECC_BUF_CFG(info)     MSM_NAND_REG(info, 0x300F0)
+#define MSM_NAND_ERASED_CW_DETECT_CFG(info)	MSM_NAND_REG(info, 0x300E8)
+#define MSM_NAND_ERASED_CW_DETECT_STATUS(info)  MSM_NAND_REG(info, 0x300EC)
+
+#define MSM_NAND_CTRL(info)		    MSM_NAND_REG(info, 0x30F00)
+#define BAM_MODE_EN	0
+
+#define MSM_NAND_READ_LOCATION_0(info)      MSM_NAND_REG(info, 0x30F20)
+#define MSM_NAND_READ_LOCATION_1(info)      MSM_NAND_REG(info, 0x30F24)
+
+/* device commands */
+#define MSM_NAND_CMD_PAGE_READ          0x32
+#define MSM_NAND_CMD_PAGE_READ_ECC      0x33
+#define MSM_NAND_CMD_PAGE_READ_ALL      0x34
+#define MSM_NAND_CMD_PRG_PAGE           0x36
+#define MSM_NAND_CMD_PRG_PAGE_ECC       0x37
+#define MSM_NAND_CMD_PRG_PAGE_ALL       0x39
+#define MSM_NAND_CMD_BLOCK_ERASE        0x3A
+#define MSM_NAND_CMD_FETCH_ID           0x0B
+
+/* Structure that defines a NAND SPS command element */
+struct msm_nand_sps_cmd {
+	struct sps_command_element ce;
+	uint32_t flags;
+};
+
+/*
+ * Structure that defines the NAND controller properties as per the
+ * NAND flash device/chip that is attached.
+ */
+struct msm_nand_chip {
+	struct device *dev;
+	/*
+	 * DMA memory will be allocated only once during probe and this memory
+	 * will be used by all NAND clients. This wait queue is needed to
+	 * make the applications wait for DMA memory to be free'd when the
+	 * complete memory is exhausted.
+	 */
+	wait_queue_head_t dma_wait_queue;
+	atomic_t dma_buffer_busy;
+	uint8_t *dma_virt_addr;
+	dma_addr_t dma_phys_addr;
+	uint32_t ecc_parity_bytes;
+	uint32_t bch_caps; /* Controller BCH ECC capabilities */
+#define MSM_NAND_CAP_4_BIT_BCH      (1 << 0)
+#define MSM_NAND_CAP_8_BIT_BCH      (1 << 1)
+	uint32_t cw_size;
+	/* NANDc register configurations */
+	uint32_t cfg0, cfg1, cfg0_raw, cfg1_raw;
+	uint32_t ecc_buf_cfg;
+	uint32_t ecc_bch_cfg;
+};
+
+/* Structure that defines an SPS end point for a NANDc BAM pipe. */
+struct msm_nand_sps_endpt {
+	struct sps_pipe *handle;
+	struct sps_connect config;
+	struct sps_register_event event;
+	struct completion completion;
+};
+
+/*
+ * Structure that defines NANDc SPS data - BAM handle and an end point
+ * for each BAM pipe.
+ */
+struct msm_nand_sps_info {
+	uint32_t bam_handle;
+	struct msm_nand_sps_endpt data_prod;
+	struct msm_nand_sps_endpt data_cons;
+	struct msm_nand_sps_endpt cmd_pipe;
+};
+
+/*
+ * Structure that contains flash device information. This gets updated after
+ * the NAND flash device detection.
+ */
+struct flash_identification {
+	uint32_t flash_id;
+	uint32_t density;
+	uint32_t widebus;
+	uint32_t pagesize;
+	uint32_t blksize;
+	uint32_t oobsize;
+	uint32_t ecc_correctability;
+};
+
+/* Structure that defines NANDc private data. */
+struct msm_nand_info {
+	struct mtd_info		mtd;
+	struct msm_nand_chip	nand_chip;
+	struct msm_nand_sps_info sps;
+	unsigned long bam_phys;
+	unsigned long nand_phys;
+	void __iomem *bam_base;
+	int bam_irq;
+	/*
+	 * This lock must be acquired before submitting any command or data
+	 * descriptors to BAM pipes and must be held until all the submitted
+	 * descriptors are processed.
+	 *
+	 * This is required to ensure that both command and descriptors are
+	 * submitted atomically without interruption from other clients,
+	 * when there are requests from more than client at any time.
+	 * Othewise, data and command descriptors can be submitted out of
+	 * order for a request which can cause data corruption.
+	 */
+	struct mutex bam_lock;
+	struct flash_identification flash_dev;
+};
+
+/* Structure that defines an ONFI parameter page (512B) */
+struct onfi_param_page {
+	uint32_t parameter_page_signature;
+	uint16_t revision_number;
+	uint16_t features_supported;
+	uint16_t optional_commands_supported;
+	uint8_t  reserved0[22];
+	uint8_t  device_manufacturer[12];
+	uint8_t  device_model[20];
+	uint8_t  jedec_manufacturer_id;
+	uint16_t date_code;
+	uint8_t  reserved1[13];
+	uint32_t number_of_data_bytes_per_page;
+	uint16_t number_of_spare_bytes_per_page;
+	uint32_t number_of_data_bytes_per_partial_page;
+	uint16_t number_of_spare_bytes_per_partial_page;
+	uint32_t number_of_pages_per_block;
+	uint32_t number_of_blocks_per_logical_unit;
+	uint8_t  number_of_logical_units;
+	uint8_t  number_of_address_cycles;
+	uint8_t  number_of_bits_per_cell;
+	uint16_t maximum_bad_blocks_per_logical_unit;
+	uint16_t block_endurance;
+	uint8_t  guaranteed_valid_begin_blocks;
+	uint16_t guaranteed_valid_begin_blocks_endurance;
+	uint8_t  number_of_programs_per_page;
+	uint8_t  partial_program_attributes;
+	uint8_t  number_of_bits_ecc_correctability;
+	uint8_t  number_of_interleaved_address_bits;
+	uint8_t  interleaved_operation_attributes;
+	uint8_t  reserved2[13];
+	uint8_t  io_pin_capacitance;
+	uint16_t timing_mode_support;
+	uint16_t program_cache_timing_mode_support;
+	uint16_t maximum_page_programming_time;
+	uint16_t maximum_block_erase_time;
+	uint16_t maximum_page_read_time;
+	uint16_t maximum_change_column_setup_time;
+	uint8_t  reserved3[23];
+	uint16_t vendor_specific_revision_number;
+	uint8_t  vendor_specific[88];
+	uint16_t integrity_crc;
+} __attribute__((__packed__));
+
+/*
+ * Get the DMA memory for requested amount of size. It returns the pointer
+ * to free memory available from the allocated pool. Returns NULL if there
+ * is no free memory.
+ */
+static void *msm_nand_get_dma_buffer(struct msm_nand_chip *chip, size_t size)
+{
+	uint32_t bitmask, free_bitmask, old_bitmask;
+	uint32_t need_mask, current_need_mask;
+	int free_index;
+
+	need_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
+			- 1;
+	bitmask = atomic_read(&chip->dma_buffer_busy);
+	free_bitmask = ~bitmask;
+	do {
+		free_index = __ffs(free_bitmask);
+		current_need_mask = need_mask << free_index;
+
+		if (size + free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ >=
+						 MSM_NAND_DMA_BUFFER_SIZE)
+			return NULL;
+
+		if ((bitmask & current_need_mask) == 0) {
+			old_bitmask =
+				atomic_cmpxchg(&chip->dma_buffer_busy,
+					       bitmask,
+					       bitmask | current_need_mask);
+			if (old_bitmask == bitmask)
+				return chip->dma_virt_addr +
+				free_index * MSM_NAND_DMA_BUFFER_SLOT_SZ;
+			free_bitmask = 0;/* force return */
+		}
+		/* current free range was too small, clear all free bits */
+		/* below the top busy bit within current_need_mask */
+		free_bitmask &=
+			~(~0U >> (32 - fls(bitmask & current_need_mask)));
+	} while (free_bitmask);
+
+	return NULL;
+}
+
+/*
+ * Releases the DMA memory used to the free pool and also wakes up any user
+ * thread waiting on wait queue for free memory to be available.
+ */
+static void msm_nand_release_dma_buffer(struct msm_nand_chip *chip,
+					void *buffer, size_t size)
+{
+	int index;
+	uint32_t used_mask;
+
+	used_mask = (1UL << DIV_ROUND_UP(size, MSM_NAND_DMA_BUFFER_SLOT_SZ))
+			- 1;
+	index = ((uint8_t *)buffer - chip->dma_virt_addr) /
+		MSM_NAND_DMA_BUFFER_SLOT_SZ;
+	atomic_sub(used_mask << index, &chip->dma_buffer_busy);
+
+	wake_up(&chip->dma_wait_queue);
+}
+
+/*
+ * Calculates page address of the buffer passed, offset of buffer within
+ * that page and then maps it for DMA by calling dma_map_page().
+ */
+static dma_addr_t msm_nand_dma_map(struct device *dev, void *addr, size_t size,
+					 enum dma_data_direction dir)
+{
+	struct page *page;
+	unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+	if (virt_addr_valid(addr))
+		page = virt_to_page(addr);
+	else {
+		if (WARN_ON(size + offset > PAGE_SIZE))
+			return ~0;
+		page = vmalloc_to_page(addr);
+	}
+	return dma_map_page(dev, page, offset, size, dir);
+}
+
+/*
+ * Wrapper function to prepare a SPS command element with the data that is
+ * passed to this function.
+ *
+ * Since for any command element it is a must to have this flag
+ * SPS_IOVEC_FLAG_CMD, this function by default updates this flag for a
+ * command element that is passed and thus, the caller need not explicilty
+ * pass this flag. The other flags must be passed based on the need.  If a
+ * command element doesn't have any other flag, then 0 can be passed to flags.
+ */
+static inline void msm_nand_prep_ce(struct msm_nand_sps_cmd *sps_cmd,
+				uint32_t addr, uint32_t command,
+				uint32_t data, uint32_t flags)
+{
+	struct sps_command_element *cmd = &sps_cmd->ce;
+
+	cmd->addr = addr;
+	cmd->command = (command & WRITE) ? (uint32_t) SPS_WRITE_COMMAND :
+			(uint32_t) SPS_READ_COMMAND;
+	cmd->data = data;
+	cmd->mask = 0xFFFFFFFF;
+	sps_cmd->flags = SPS_IOVEC_FLAG_CMD | flags;
+}
+
+/*
+ * Read a single NANDc register as mentioned by its parameter addr. The return
+ * value indicates whether read is successful or not. The register value read
+ * is stored in val.
+ */
+static int msm_nand_flash_rd_reg(struct msm_nand_info *info, uint32_t addr,
+				uint32_t *val)
+{
+	int ret = 0;
+	struct msm_nand_sps_cmd *cmd;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	struct {
+		struct msm_nand_sps_cmd cmd;
+		uint32_t data;
+	} *dma_buffer;
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+		    chip, sizeof(*dma_buffer))));
+	cmd = &dma_buffer->cmd;
+	msm_nand_prep_ce(cmd, addr, READ, msm_virt_to_dma(chip,
+			&dma_buffer->data), SPS_IOVEC_FLAG_INT);
+
+	ret = sps_transfer_one(info->sps.cmd_pipe.handle,
+			msm_virt_to_dma(chip, &cmd->ce),
+			sizeof(struct sps_command_element), NULL, cmd->flags);
+	if (ret) {
+		pr_err("failed to submit command %x ret %d\n", addr, ret);
+		goto out;
+	}
+	wait_for_completion_io(&info->sps.cmd_pipe.completion);
+	*val = dma_buffer->data;
+out:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	return ret;
+}
+
+/*
+ * Read the Flash ID from the Nand Flash Device. The return value < 0
+ * indicates failure. When successful, the Flash ID is stored in parameter
+ * read_id.
+ */
+static int msm_nand_flash_read_id(struct msm_nand_info *info,
+		bool read_onfi_signature,
+		uint32_t *read_id)
+{
+	int err = 0, i;
+	struct msm_nand_sps_cmd *cmd;
+	struct sps_iovec *iovec;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	uint32_t total_cnt = 4;
+	/*
+	 * The following 4 commands are required to read id -
+	 * write commands - addr0, flash, exec
+	 * read_commands - read_id
+	 */
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[total_cnt];
+		struct msm_nand_sps_cmd cmd[total_cnt];
+		uint32_t data[total_cnt];
+	} *dma_buffer;
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+	if (read_onfi_signature)
+		dma_buffer->data[0] = FLASH_READ_ONFI_SIGNATURE_ADDRESS;
+	else
+		dma_buffer->data[0] = FLASH_READ_DEVICE_ID_ADDRESS;
+
+	dma_buffer->data[1] = MSM_NAND_CMD_FETCH_ID;
+	dma_buffer->data[2] = 1;
+	dma_buffer->data[3] = 0xeeeeeeee;
+
+	cmd = dma_buffer->cmd;
+	msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
+			dma_buffer->data[0], SPS_IOVEC_FLAG_LOCK);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE,
+			dma_buffer->data[1], 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+			dma_buffer->data[2], SPS_IOVEC_FLAG_NWD);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_READ_ID(info), READ,
+		msm_virt_to_dma(chip, &dma_buffer->data[3]),
+		SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+	cmd++;
+
+	BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+	dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+	dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+	dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+					&dma_buffer->cmd_iovec);
+	iovec = dma_buffer->xfer.iovec;
+
+	for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+		iovec->addr =  msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+		iovec->size = sizeof(struct sps_command_element);
+		iovec->flags = dma_buffer->cmd[i].flags;
+		iovec++;
+	}
+
+	mutex_lock(&info->bam_lock);
+	err =  sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+	if (err) {
+		pr_err("Failed to submit commands %d\n", err);
+		mutex_unlock(&info->bam_lock);
+		goto out;
+	}
+	wait_for_completion_io(&info->sps.cmd_pipe.completion);
+	mutex_unlock(&info->bam_lock);
+
+	pr_debug("Read ID register value 0x%x\n", dma_buffer->data[3]);
+	if (!read_onfi_signature)
+		pr_debug("nandid: %x maker %02x device %02x\n",
+		       dma_buffer->data[3], dma_buffer->data[3] & 0xff,
+		       (dma_buffer->data[3] >> 8) & 0xff);
+	*read_id = dma_buffer->data[3];
+out:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	return err;
+}
+
+/*
+ * Contains data for common configuration registers that must be programmed
+ * for every NANDc operation.
+ */
+struct msm_nand_common_cfgs {
+	uint32_t cmd;
+	uint32_t addr0;
+	uint32_t addr1;
+	uint32_t cfg0;
+	uint32_t cfg1;
+};
+
+/*
+ * Function to prepare SPS command elements to write into NANDc configuration
+ * registers as per the data defined in struct msm_nand_common_cfgs. This is
+ * required for the following NANDc operations - Erase, Bad Block checking
+ * and for reading ONFI parameter page.
+ */
+static void msm_nand_prep_cfg_cmd_desc(struct msm_nand_info *info,
+				struct msm_nand_common_cfgs data,
+				struct msm_nand_sps_cmd **curr_cmd)
+{
+	struct msm_nand_sps_cmd *cmd;
+
+	cmd = *curr_cmd;
+	msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data.cmd,
+			SPS_IOVEC_FLAG_LOCK);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE, data.addr0, 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE, data.addr1, 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE, data.cfg0, 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE, data.cfg1, 0);
+	cmd++;
+	*curr_cmd = cmd;
+}
+
+/*
+ * Function to check the CRC integrity check on ONFI parameter page read.
+ * For ONFI parameter page read, the controller ECC will be disabled. Hence,
+ * it is mandatory to manually compute CRC and check it against the value
+ * stored within ONFI page.
+ */
+static uint16_t msm_nand_flash_onfi_crc_check(uint8_t *buffer, uint16_t count)
+{
+	int i;
+	uint16_t result;
+
+	for (i = 0; i < count; i++)
+		buffer[i] = bitrev8(buffer[i]);
+
+	result = bitrev16(crc16(bitrev16(0x4f4e), buffer, count));
+
+	for (i = 0; i < count; i++)
+		buffer[i] = bitrev8(buffer[i]);
+
+	return result;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for reading ONFI paramter page.
+ */
+struct msm_nand_flash_onfi_data {
+	struct msm_nand_common_cfgs cfg;
+	uint32_t exec;
+	uint32_t devcmd1_orig;
+	uint32_t devcmdvld_orig;
+	uint32_t devcmd1_mod;
+	uint32_t devcmdvld_mod;
+	uint32_t ecc_bch_cfg;
+};
+
+/*
+ * Function to identify whether the attached NAND flash device is
+ * complaint to ONFI spec or not. If yes, then it reads the ONFI parameter
+ * page to get the device parameters.
+ */
+static int msm_nand_flash_onfi_probe(struct msm_nand_info *info)
+{
+	struct msm_nand_chip *chip = &info->nand_chip;
+	struct flash_identification *flash = &info->flash_dev;
+	uint32_t crc_chk_count = 0, page_address = 0;
+	int ret = 0, i;
+
+	/* SPS parameters */
+	struct msm_nand_sps_cmd *cmd, *curr_cmd;
+	struct sps_iovec *iovec;
+	uint32_t rdata;
+
+	/* ONFI Identifier/Parameter Page parameters */
+	uint8_t *onfi_param_info_buf = NULL;
+	dma_addr_t dma_addr_param_info = 0;
+	struct onfi_param_page *onfi_param_page_ptr;
+	struct msm_nand_flash_onfi_data data;
+	uint32_t onfi_signature;
+
+	/* SPS command/data descriptors */
+	uint32_t total_cnt = 13;
+	/*
+	 * The following 13 commands are required to get onfi parameters -
+	 * flash, addr0, addr1, cfg0, cfg1, dev0_ecc_cfg, cmd_vld, dev_cmd1,
+	 * read_loc_0, exec, flash_status (read cmd), dev_cmd1, cmd_vld.
+	 */
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[total_cnt];
+		struct msm_nand_sps_cmd cmd[total_cnt];
+		uint32_t flash_status;
+	} *dma_buffer;
+
+	wait_event(chip->dma_wait_queue, (onfi_param_info_buf =
+		msm_nand_get_dma_buffer(chip, ONFI_PARAM_INFO_LENGTH)));
+	dma_addr_param_info = msm_virt_to_dma(chip, onfi_param_info_buf);
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer
+				(chip, sizeof(*dma_buffer))));
+
+	ret = msm_nand_flash_read_id(info, 1, &onfi_signature);
+	if (ret < 0) {
+		pr_err("Failed to read ONFI signature\n");
+		goto free_dma;
+	}
+	if (onfi_signature != ONFI_PARAMETER_PAGE_SIGNATURE) {
+		pr_info("Found a non ONFI device\n");
+		ret = -EIO;
+		goto free_dma;
+	}
+
+	memset(&data, 0, sizeof(struct msm_nand_flash_onfi_data));
+	ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD1(info),
+				&data.devcmd1_orig);
+	if (ret < 0)
+		goto free_dma;
+	ret = msm_nand_flash_rd_reg(info, MSM_NAND_DEV_CMD_VLD(info),
+			&data.devcmdvld_orig);
+	if (ret < 0)
+		goto free_dma;
+
+	data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+	data.exec = 1;
+	data.cfg.addr0 = (page_address << 16) |
+				FLASH_READ_ONFI_PARAMETERS_ADDRESS;
+	data.cfg.addr1 = (page_address >> 16) & 0xFF;
+	data.cfg.cfg0 =	MSM_NAND_CFG0_RAW_ONFI_PARAM_INFO;
+	data.cfg.cfg1 = MSM_NAND_CFG1_RAW_ONFI_PARAM_INFO;
+	data.devcmd1_mod = (data.devcmd1_orig & 0xFFFFFF00) |
+				FLASH_READ_ONFI_PARAMETERS_COMMAND;
+	data.devcmdvld_mod = data.devcmdvld_orig & 0xFFFFFFFE;
+	data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+	dma_buffer->flash_status = 0xeeeeeeee;
+
+	curr_cmd = cmd = dma_buffer->cmd;
+	msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+	cmd = curr_cmd;
+	msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+			data.ecc_bch_cfg, 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
+			data.devcmdvld_mod, 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
+			data.devcmd1_mod, 0);
+	cmd++;
+
+	rdata = (0 << 0) | (ONFI_PARAM_INFO_LENGTH << 16) | (1 << 31);
+	msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+			rdata, 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+		data.exec, SPS_IOVEC_FLAG_NWD);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+		msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD1(info), WRITE,
+			data.devcmd1_orig, 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_DEV_CMD_VLD(info), WRITE,
+			data.devcmdvld_orig,
+			SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+	cmd++;
+
+	BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+	dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+	dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+	dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+					&dma_buffer->cmd_iovec);
+	iovec = dma_buffer->xfer.iovec;
+
+	for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+		iovec->addr =  msm_virt_to_dma(chip,
+				&dma_buffer->cmd[i].ce);
+		iovec->size = sizeof(struct sps_command_element);
+		iovec->flags = dma_buffer->cmd[i].flags;
+		iovec++;
+	}
+	mutex_lock(&info->bam_lock);
+	/* Submit data descriptor */
+	ret = sps_transfer_one(info->sps.data_prod.handle, dma_addr_param_info,
+			ONFI_PARAM_INFO_LENGTH, NULL, SPS_IOVEC_FLAG_INT);
+	if (ret) {
+		pr_err("Failed to submit data descriptors %d\n", ret);
+		mutex_unlock(&info->bam_lock);
+		goto free_dma;
+	}
+	/* Submit command descriptors */
+	ret =  sps_transfer(info->sps.cmd_pipe.handle,
+			&dma_buffer->xfer);
+	if (ret) {
+		pr_err("Failed to submit commands %d\n", ret);
+		mutex_unlock(&info->bam_lock);
+		goto free_dma;
+	}
+	wait_for_completion_io(&info->sps.cmd_pipe.completion);
+	wait_for_completion_io(&info->sps.data_prod.completion);
+	mutex_unlock(&info->bam_lock);
+
+	/* Check for flash status errors */
+	if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+		pr_err("MPU/OP err (0x%x) is set\n", dma_buffer->flash_status);
+		ret = -EIO;
+		goto free_dma;
+	}
+
+	for (crc_chk_count = 0; crc_chk_count < ONFI_PARAM_INFO_LENGTH
+			/ ONFI_PARAM_PAGE_LENGTH; crc_chk_count++) {
+		onfi_param_page_ptr =
+			(struct onfi_param_page *)
+			(&(onfi_param_info_buf
+			[ONFI_PARAM_PAGE_LENGTH *
+			crc_chk_count]));
+		if (msm_nand_flash_onfi_crc_check(
+			(uint8_t *)onfi_param_page_ptr,
+			ONFI_PARAM_PAGE_LENGTH - 2) ==
+			onfi_param_page_ptr->integrity_crc) {
+			break;
+		}
+	}
+	if (crc_chk_count >= ONFI_PARAM_INFO_LENGTH
+			/ ONFI_PARAM_PAGE_LENGTH) {
+		pr_err("CRC Check failed on param page\n");
+		ret = -EIO;
+		goto free_dma;
+	}
+	ret = msm_nand_flash_read_id(info, 0, &flash->flash_id);
+	if (ret < 0) {
+		pr_err("Failed to read flash ID\n");
+		goto free_dma;
+	}
+	flash->widebus  = onfi_param_page_ptr->features_supported & 0x01;
+	flash->pagesize = onfi_param_page_ptr->number_of_data_bytes_per_page;
+	flash->blksize  = onfi_param_page_ptr->number_of_pages_per_block *
+					flash->pagesize;
+	flash->oobsize  = onfi_param_page_ptr->number_of_spare_bytes_per_page;
+	flash->density  = onfi_param_page_ptr->number_of_blocks_per_logical_unit
+					* flash->blksize;
+	flash->ecc_correctability = onfi_param_page_ptr->
+					number_of_bits_ecc_correctability;
+
+	pr_info("Found an ONFI compliant device %s\n",
+			onfi_param_page_ptr->device_model);
+	/*
+	 * Temporary hack for MT29F4G08ABC device.
+	 * Since the device is not properly adhering
+	 * to ONFi specification it is reporting
+	 * as 16 bit device though it is 8 bit device!!!
+	 */
+	if (!strncmp(onfi_param_page_ptr->device_model, "MT29F4G08ABC", 12))
+		flash->widebus  = 0;
+free_dma:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	msm_nand_release_dma_buffer(chip, onfi_param_info_buf,
+			ONFI_PARAM_INFO_LENGTH);
+	return ret;
+}
+
+/*
+ * Structure that contains read/write parameters required for reading/writing
+ * from/to a page.
+ */
+struct msm_nand_rw_params {
+	uint32_t page;
+	uint32_t page_count;
+	uint32_t sectordatasize;
+	uint32_t sectoroobsize;
+	uint32_t cwperpage;
+	uint32_t oob_len_cmd;
+	uint32_t oob_len_data;
+	uint32_t start_sector;
+	uint32_t oob_col;
+	dma_addr_t data_dma_addr;
+	dma_addr_t oob_dma_addr;
+	dma_addr_t data_dma_addr_curr;
+	dma_addr_t oob_dma_addr_curr;
+	bool read;
+};
+
+/*
+ * Structure that contains NANDc register data required for reading/writing
+ * from/to a page.
+ */
+struct msm_nand_rw_reg_data {
+	uint32_t cmd;
+	uint32_t addr0;
+	uint32_t addr1;
+	uint32_t cfg0;
+	uint32_t cfg1;
+	uint32_t ecc_bch_cfg;
+	uint32_t exec;
+	uint32_t ecc_cfg;
+	uint32_t clrfstatus;
+	uint32_t clrrstatus;
+};
+
+/*
+ * Function that validates page read/write MTD parameters received from upper
+ * layers such as MTD/YAFFS2 and returns error for any unsupported operations
+ * by the driver. In case of success, it also maps the data and oob buffer
+ * received for DMA.
+ */
+static int msm_nand_validate_mtd_params(struct mtd_info *mtd, bool read,
+					loff_t offset,
+					struct mtd_oob_ops *ops,
+					struct msm_nand_rw_params *args)
+{
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	int err = 0;
+
+	pr_debug("========================================================\n");
+	pr_debug("offset 0x%llx mode %d\ndatbuf 0x%p datlen 0x%x\n",
+			offset, ops->mode, ops->datbuf, ops->len);
+	pr_debug("oobbuf 0x%p ooblen 0x%x\n", ops->oobbuf, ops->ooblen);
+
+	if (ops->mode == MTD_OPS_PLACE_OOB) {
+		pr_err("MTD_OPS_PLACE_OOB is not supported\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (mtd->writesize == PAGE_SIZE_2K)
+		args->page = offset >> 11;
+
+	if (mtd->writesize == PAGE_SIZE_4K)
+		args->page = offset >> 12;
+
+	args->oob_len_cmd = ops->ooblen;
+	args->oob_len_data = ops->ooblen;
+	args->cwperpage = (mtd->writesize >> 9);
+	args->read = (read ? true : false);
+
+	if (offset & (mtd->writesize - 1)) {
+		pr_err("unsupported offset 0x%llx\n", offset);
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (!read && !ops->datbuf) {
+		pr_err("No data buffer provided for write!!\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (ops->mode == MTD_OPS_RAW) {
+		if (!ops->datbuf) {
+			pr_err("No data buffer provided for RAW mode\n");
+			err =  -EINVAL;
+			goto out;
+		} else if ((ops->len % (mtd->writesize +
+				mtd->oobsize)) != 0) {
+			pr_err("unsupported data len %d for RAW mode\n",
+				ops->len);
+			err = -EINVAL;
+			goto out;
+		}
+		args->page_count = ops->len / (mtd->writesize + mtd->oobsize);
+
+	} else if (ops->mode == MTD_OPS_AUTO_OOB) {
+		if (ops->datbuf && (ops->len % mtd->writesize) != 0) {
+			/* when ops->datbuf is NULL, ops->len can be ooblen */
+			pr_err("unsupported data len %d for AUTO mode\n",
+					ops->len);
+			err = -EINVAL;
+			goto out;
+		}
+		if (read && ops->oobbuf && !ops->datbuf) {
+			args->start_sector = args->cwperpage - 1;
+			args->page_count = ops->ooblen / mtd->oobavail;
+			if ((args->page_count == 0) && (ops->ooblen))
+				args->page_count = 1;
+		} else if (ops->datbuf) {
+			args->page_count = ops->len / mtd->writesize;
+		}
+	}
+
+	if (ops->datbuf) {
+		args->data_dma_addr_curr = args->data_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->datbuf, ops->len,
+				      (read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
+		if (dma_mapping_error(chip->dev, args->data_dma_addr)) {
+			pr_err("dma mapping failed for 0x%p\n", ops->datbuf);
+			err = -EIO;
+			goto out;
+		}
+	}
+	if (ops->oobbuf) {
+		if (read)
+			memset(ops->oobbuf, 0xFF, ops->ooblen);
+		args->oob_dma_addr_curr = args->oob_dma_addr =
+			msm_nand_dma_map(chip->dev, ops->oobbuf, ops->ooblen,
+				(read ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE));
+		if (dma_mapping_error(chip->dev, args->oob_dma_addr)) {
+			pr_err("dma mapping failed for 0x%p\n", ops->oobbuf);
+			err = -EIO;
+			goto dma_map_oobbuf_failed;
+		}
+	}
+	goto out;
+dma_map_oobbuf_failed:
+	if (ops->datbuf)
+		dma_unmap_page(chip->dev, args->data_dma_addr, ops->len,
+				(read ? DMA_FROM_DEVICE : DMA_TO_DEVICE));
+out:
+	return err;
+}
+
+/*
+ * Function that updates NANDc register data (struct msm_nand_rw_reg_data)
+ * required for page read/write.
+ */
+static void msm_nand_update_rw_reg_data(struct msm_nand_chip *chip,
+					struct mtd_oob_ops *ops,
+					struct msm_nand_rw_params *args,
+					struct msm_nand_rw_reg_data *data)
+{
+	if (args->read) {
+		if (ops->mode != MTD_OPS_RAW) {
+			data->cmd = MSM_NAND_CMD_PAGE_READ_ECC;
+			data->cfg0 =
+			(chip->cfg0 & ~(7U << CW_PER_PAGE)) |
+			(((args->cwperpage-1) - args->start_sector)
+			 << CW_PER_PAGE);
+			data->cfg1 = chip->cfg1;
+			data->ecc_bch_cfg = chip->ecc_bch_cfg;
+		} else {
+			data->cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+			data->cfg0 = chip->cfg0_raw;
+			data->cfg1 = chip->cfg1_raw;
+			data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+		}
+
+	} else {
+		if (ops->mode != MTD_OPS_RAW) {
+			data->cfg0 = chip->cfg0;
+			data->cfg1 = chip->cfg1;
+			data->ecc_bch_cfg = chip->ecc_bch_cfg;
+		} else {
+			data->cfg0 = chip->cfg0_raw;
+			data->cfg1 = chip->cfg1_raw;
+			data->ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+		}
+		data->cmd = MSM_NAND_CMD_PRG_PAGE;
+		data->clrfstatus = MSM_NAND_RESET_FLASH_STS;
+		data->clrrstatus = MSM_NAND_RESET_READ_STS;
+	}
+	data->exec = 1;
+	data->ecc_cfg = chip->ecc_buf_cfg;
+}
+
+/*
+ * Function to prepare series of SPS command descriptors required for a page
+ * read/write operation.
+ */
+static void msm_nand_prep_rw_cmd_desc(struct mtd_oob_ops *ops,
+				struct msm_nand_rw_params *args,
+				struct msm_nand_rw_reg_data *data,
+				struct msm_nand_info *info,
+				uint32_t curr_cw,
+				struct msm_nand_sps_cmd **curr_cmd)
+{
+	struct msm_nand_chip *chip = &info->nand_chip;
+	struct msm_nand_sps_cmd *cmd;
+	uint32_t rdata;
+	/* read_location register parameters */
+	uint32_t offset, size, last_read;
+
+	cmd = *curr_cmd;
+	msm_nand_prep_ce(cmd, MSM_NAND_FLASH_CMD(info), WRITE, data->cmd,
+			((curr_cw == args->start_sector) ?
+			 SPS_IOVEC_FLAG_LOCK : 0));
+	cmd++;
+
+	if (curr_cw == args->start_sector) {
+		msm_nand_prep_ce(cmd, MSM_NAND_ADDR0(info), WRITE,
+				data->addr0, 0);
+		cmd++;
+
+		msm_nand_prep_ce(cmd, MSM_NAND_ADDR1(info), WRITE,
+				data->addr1, 0);
+		cmd++;
+
+		msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG0(info), WRITE,
+				data->cfg0, 0);
+		cmd++;
+
+		msm_nand_prep_ce(cmd, MSM_NAND_DEV0_CFG1(info), WRITE,
+				data->cfg1, 0);
+		cmd++;
+
+		msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+				data->ecc_bch_cfg, 0);
+		cmd++;
+
+		msm_nand_prep_ce(cmd, MSM_NAND_EBI2_ECC_BUF_CFG(info),
+				WRITE, data->ecc_cfg, 0);
+		cmd++;
+	}
+
+	if (!args->read)
+		goto sub_exec_cmd;
+
+	if (ops->mode == MTD_OPS_RAW) {
+		rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31);
+		msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+				rdata, 0);
+		cmd++;
+	}
+	if (ops->mode == MTD_OPS_AUTO_OOB && ops->datbuf) {
+		offset = 0;
+		size = (curr_cw < (args->cwperpage - 1)) ? 516 :
+			(512 - ((args->cwperpage - 1) << 2));
+		last_read = (curr_cw < (args->cwperpage - 1)) ? 1 :
+			(ops->oobbuf ? 0 : 1);
+		rdata = (offset << 0) | (size << 16) | (last_read << 31);
+		msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE,
+				rdata, 0);
+		cmd++;
+	}
+	if (ops->mode == MTD_OPS_AUTO_OOB && ops->oobbuf
+			&& (curr_cw == (args->cwperpage - 1))) {
+		offset = 512 - ((args->cwperpage - 1) << 2);
+		size = (args->cwperpage) << 2;
+		if (size > args->oob_len_cmd)
+			size = args->oob_len_cmd;
+		args->oob_len_cmd -= size;
+		last_read = 1;
+		rdata = (offset << 0) | (size << 16) | (last_read << 31);
+		if (ops->datbuf) {
+			msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_1(info),
+					WRITE, rdata, 0);
+		} else {
+			msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info),
+					WRITE, rdata, 0);
+		}
+		cmd++;
+	}
+sub_exec_cmd:
+	msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data->exec,
+			SPS_IOVEC_FLAG_NWD);
+	cmd++;
+	*curr_cmd = cmd;
+}
+
+/*
+ * Function to prepare and submit SPS data descriptors required for a page
+ * read/write operation.
+ */
+static int msm_nand_submit_rw_data_desc(struct mtd_oob_ops *ops,
+				struct msm_nand_rw_params *args,
+				struct msm_nand_info *info,
+				uint32_t curr_cw)
+{
+	struct msm_nand_chip *chip = &info->nand_chip;
+	struct sps_pipe *data_pipe_handle;
+	uint32_t sectordatasize, sectoroobsize;
+	uint32_t sps_flags = 0;
+	int err = 0;
+
+	if (args->read)
+		data_pipe_handle = info->sps.data_prod.handle;
+	else
+		data_pipe_handle = info->sps.data_cons.handle;
+
+	if (ops->mode == MTD_OPS_RAW) {
+		sectordatasize = chip->cw_size;
+		if (!args->read)
+			sps_flags = SPS_IOVEC_FLAG_EOT;
+		if (curr_cw == (args->cwperpage - 1))
+			sps_flags |= SPS_IOVEC_FLAG_INT;
+
+		err = sps_transfer_one(data_pipe_handle,
+				args->data_dma_addr_curr,
+				sectordatasize, NULL,
+				sps_flags);
+		if (err)
+			goto out;
+		args->data_dma_addr_curr += sectordatasize;
+
+	} else if (ops->mode == MTD_OPS_AUTO_OOB) {
+		if (ops->datbuf) {
+			sectordatasize = (curr_cw < (args->cwperpage - 1))
+			? 516 : (512 - ((args->cwperpage - 1) << 2));
+
+			if (!args->read) {
+				sps_flags = SPS_IOVEC_FLAG_EOT;
+				if (curr_cw == (args->cwperpage - 1) &&
+						ops->oobbuf)
+					sps_flags = 0;
+			}
+			if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf)
+				sps_flags |= SPS_IOVEC_FLAG_INT;
+
+			err = sps_transfer_one(data_pipe_handle,
+					args->data_dma_addr_curr,
+					sectordatasize, NULL,
+					sps_flags);
+			if (err)
+				goto out;
+			args->data_dma_addr_curr += sectordatasize;
+		}
+
+		if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) {
+			sectoroobsize = args->cwperpage << 2;
+			if (sectoroobsize > args->oob_len_data)
+				sectoroobsize = args->oob_len_data;
+
+			if (!args->read)
+				sps_flags |= SPS_IOVEC_FLAG_EOT;
+			sps_flags |= SPS_IOVEC_FLAG_INT;
+			err = sps_transfer_one(data_pipe_handle,
+					args->oob_dma_addr_curr,
+					sectoroobsize, NULL,
+					sps_flags);
+			if (err)
+				goto out;
+			args->oob_dma_addr_curr += sectoroobsize;
+			args->oob_len_data -= sectoroobsize;
+		}
+	}
+out:
+	return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to read a
+ * page with main or/and spare data.
+ */
+static int msm_nand_read_oob(struct mtd_info *mtd, loff_t from,
+			     struct mtd_oob_ops *ops)
+{
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	uint32_t cwperpage = (mtd->writesize >> 9);
+	int err, pageerr = 0, rawerr = 0;
+	uint32_t n = 0, pages_read = 0;
+	uint32_t ecc_errors = 0, total_ecc_errors = 0;
+	struct msm_nand_rw_params rw_params;
+	struct msm_nand_rw_reg_data data;
+	struct msm_nand_sps_cmd *cmd, *curr_cmd;
+	struct sps_iovec *iovec;
+	/*
+	 * The following 6 commands will be sent only once for the first
+	 * codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+	 * dev0_ecc_cfg, ebi2_ecc_buf_cfg. The following 6 commands will
+	 * be sent for every CW - flash, read_location_0, read_location_1,
+	 * exec, flash_status and buffer_status.
+	 */
+	uint32_t total_cnt = (6 * cwperpage) + 6;
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[total_cnt];
+		struct msm_nand_sps_cmd cmd[total_cnt];
+		struct {
+			uint32_t flash_status;
+			uint32_t buffer_status;
+		} result[cwperpage];
+	} *dma_buffer;
+
+	memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
+	err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params);
+	if (err)
+		goto validate_mtd_params_failed;
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+			    chip, sizeof(*dma_buffer))));
+
+	rw_params.oob_col = rw_params.start_sector * chip->cw_size;
+	if (chip->cfg1 & (1 << WIDE_FLASH))
+		rw_params.oob_col >>= 1;
+
+	memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+	msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
+
+	while (rw_params.page_count-- > 0) {
+		data.addr0 = (rw_params.page << 16) | rw_params.oob_col;
+		data.addr1 = (rw_params.page >> 16) & 0xff;
+		cmd = dma_buffer->cmd;
+		for (n = rw_params.start_sector; n < cwperpage; n++) {
+			dma_buffer->result[n].flash_status = 0xeeeeeeee;
+			dma_buffer->result[n].buffer_status = 0xeeeeeeee;
+
+			curr_cmd = cmd;
+			msm_nand_prep_rw_cmd_desc(ops, &rw_params,
+					&data, info, n, &curr_cmd);
+
+			cmd = curr_cmd;
+			msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
+				READ, msm_virt_to_dma(chip,
+				&dma_buffer->result[n].flash_status), 0);
+			cmd++;
+
+			msm_nand_prep_ce(cmd, MSM_NAND_BUFFER_STATUS(info),
+				READ, msm_virt_to_dma(chip,
+				&dma_buffer->result[n].buffer_status),
+				((n == (cwperpage - 1)) ?
+				(SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT) :
+				0));
+			cmd++;
+		}
+
+		BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+		dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+		dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+		dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+						&dma_buffer->cmd_iovec);
+		iovec = dma_buffer->xfer.iovec;
+
+		for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
+			iovec->addr =  msm_virt_to_dma(chip,
+					&dma_buffer->cmd[n].ce);
+			iovec->size = sizeof(struct sps_command_element);
+			iovec->flags = dma_buffer->cmd[n].flags;
+			iovec++;
+		}
+		mutex_lock(&info->bam_lock);
+		/* Submit data descriptors */
+		for (n = rw_params.start_sector; n < cwperpage; n++) {
+			err = msm_nand_submit_rw_data_desc(ops,
+						&rw_params, info, n);
+			if (err) {
+				pr_err("Failed to submit data descs %d\n", err);
+				mutex_unlock(&info->bam_lock);
+				goto free_dma;
+			}
+		}
+		/* Submit command descriptors */
+		err =  sps_transfer(info->sps.cmd_pipe.handle,
+				&dma_buffer->xfer);
+		if (err) {
+			pr_err("Failed to submit commands %d\n", err);
+			mutex_unlock(&info->bam_lock);
+			goto free_dma;
+		}
+		wait_for_completion_io(&info->sps.cmd_pipe.completion);
+		wait_for_completion_io(&info->sps.data_prod.completion);
+		mutex_unlock(&info->bam_lock);
+		/* Check for flash status errors */
+		pageerr = rawerr = 0;
+		for (n = rw_params.start_sector; n < cwperpage; n++) {
+			if (dma_buffer->result[n].flash_status & (FS_OP_ERR |
+					FS_MPU_ERR)) {
+				rawerr = -EIO;
+				break;
+			}
+		}
+		/* Check for ECC correction on empty block */
+		if (rawerr && ops->datbuf && ops->mode != MTD_OPS_RAW) {
+			uint8_t *datbuf = ops->datbuf +
+				pages_read * mtd->writesize;
+
+			dma_sync_single_for_cpu(chip->dev,
+			rw_params.data_dma_addr_curr - mtd->writesize,
+			mtd->writesize, DMA_BIDIRECTIONAL);
+
+			for (n = 0; n < mtd->writesize; n++) {
+				/* TODO: check offset for 4bit BCHECC */
+				if ((n % 516 == 3 || n % 516 == 175)
+						&& datbuf[n] == 0x54)
+					datbuf[n] = 0xff;
+				if (datbuf[n] != 0xff) {
+					pageerr = rawerr;
+					break;
+				}
+			}
+
+			dma_sync_single_for_device(chip->dev,
+			rw_params.data_dma_addr_curr - mtd->writesize,
+			mtd->writesize, DMA_BIDIRECTIONAL);
+		}
+		if (rawerr && ops->oobbuf) {
+			dma_sync_single_for_cpu(chip->dev,
+			rw_params.oob_dma_addr_curr - (ops->ooblen -
+			rw_params.oob_len_data),
+			ops->ooblen - rw_params.oob_len_data,
+			DMA_BIDIRECTIONAL);
+
+			for (n = 0; n < ops->ooblen; n++) {
+				if (ops->oobbuf[n] != 0xff) {
+					pageerr = rawerr;
+					break;
+				}
+			}
+
+			dma_sync_single_for_device(chip->dev,
+			rw_params.oob_dma_addr_curr - (ops->ooblen -
+			rw_params.oob_len_data),
+			ops->ooblen - rw_params.oob_len_data,
+			DMA_BIDIRECTIONAL);
+		}
+		/* check for uncorrectable errors */
+		if (pageerr) {
+			for (n = rw_params.start_sector; n < cwperpage; n++) {
+				if (dma_buffer->result[n].buffer_status &
+					BS_UNCORRECTABLE_BIT) {
+					mtd->ecc_stats.failed++;
+					pageerr = -EBADMSG;
+					break;
+				}
+			}
+		}
+		/* check for correctable errors */
+		if (!rawerr) {
+			for (n = rw_params.start_sector; n < cwperpage; n++) {
+				ecc_errors =
+				    dma_buffer->result[n].buffer_status
+				    & BS_CORRECTABLE_ERR_MSK;
+				if (ecc_errors) {
+					total_ecc_errors += ecc_errors;
+					mtd->ecc_stats.corrected += ecc_errors;
+					/*
+					 * For Micron devices it is observed
+					 * that correctable errors upto 3 bits
+					 * are very common.
+					 */
+					if (ecc_errors > 3)
+						pageerr = -EUCLEAN;
+				}
+			}
+		}
+		if (pageerr && (pageerr != -EUCLEAN || err == 0))
+			err = pageerr;
+
+		if (rawerr && !pageerr) {
+			pr_debug("%llx %x %x empty page\n",
+			       (loff_t)rw_params.page * mtd->writesize,
+			       ops->len, ops->ooblen);
+		} else {
+			for (n = rw_params.start_sector; n < cwperpage; n++)
+				pr_debug("cw %d: flash_sts %x buffr_sts %x\n",
+				n, dma_buffer->result[n].flash_status,
+				dma_buffer->result[n].buffer_status);
+		}
+		if (err && err != -EUCLEAN && err != -EBADMSG)
+			goto free_dma;
+		pages_read++;
+		rw_params.page++;
+	}
+free_dma:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	if (ops->oobbuf)
+		dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
+				 ops->ooblen, DMA_FROM_DEVICE);
+	if (ops->datbuf)
+		dma_unmap_page(chip->dev, rw_params.data_dma_addr,
+				 ops->len, DMA_BIDIRECTIONAL);
+validate_mtd_params_failed:
+	if (ops->mode != MTD_OPS_RAW)
+		ops->retlen = mtd->writesize * pages_read;
+	else
+		ops->retlen = (mtd->writesize +  mtd->oobsize) * pages_read;
+	ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
+	if (err)
+		pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n",
+		       from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
+		       total_ecc_errors);
+	pr_debug("ret %d, retlen %d oobretlen %d\n",
+			err, ops->retlen, ops->oobretlen);
+
+	pr_debug("========================================================\n");
+	return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to read a
+ * page with only main data.
+ */
+static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+	      size_t *retlen, u_char *buf)
+{
+	int ret;
+	struct mtd_oob_ops ops;
+
+	ops.mode = MTD_OPS_PLACE_OOB;
+	ops.len = len;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.datbuf = buf;
+	ops.oobbuf = NULL;
+	ret =  msm_nand_read_oob(mtd, from, &ops);
+	*retlen = ops.retlen;
+	return ret;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to write a
+ * page with both main and spare data.
+ */
+static int msm_nand_write_oob(struct mtd_info *mtd, loff_t to,
+				struct mtd_oob_ops *ops)
+{
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	uint32_t cwperpage = (mtd->writesize >> 9);
+	uint32_t n, flash_sts, pages_written = 0;
+	int err = 0;
+	struct msm_nand_rw_params rw_params;
+	struct msm_nand_rw_reg_data data;
+	struct msm_nand_sps_cmd *cmd, *curr_cmd;
+	struct sps_iovec *iovec;
+	/*
+	 * The following 7 commands will be sent only once :
+	 * For first codeword (CW) - addr0, addr1, dev0_cfg0, dev0_cfg1,
+	 * dev0_ecc_cfg, ebi2_ecc_buf_cfg.
+	 * For last codeword (CW) - read_status(write)
+	 *
+	 * The following 4 commands will be sent for every CW :
+	 * flash, exec, flash_status (read), flash_status (write).
+	 */
+	uint32_t total_cnt = (4 * cwperpage) + 7;
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[total_cnt];
+		struct msm_nand_sps_cmd cmd[total_cnt];
+		struct {
+			uint32_t flash_status[cwperpage];
+		} data;
+	} *dma_buffer;
+
+	memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
+	err = msm_nand_validate_mtd_params(mtd, false, to, ops, &rw_params);
+	if (err)
+		goto validate_mtd_params_failed;
+
+	wait_event(chip->dma_wait_queue, (dma_buffer =
+			msm_nand_get_dma_buffer(chip, sizeof(*dma_buffer))));
+
+	memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+	msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
+
+	while (rw_params.page_count-- > 0) {
+		data.addr0 = (rw_params.page << 16);
+		data.addr1 = (rw_params.page >> 16) & 0xff;
+		cmd = dma_buffer->cmd;
+
+		for (n = 0; n < cwperpage ; n++) {
+			dma_buffer->data.flash_status[n] = 0xeeeeeeee;
+
+			curr_cmd = cmd;
+			msm_nand_prep_rw_cmd_desc(ops, &rw_params,
+					&data, info, n, &curr_cmd);
+
+			cmd = curr_cmd;
+			msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
+				READ, msm_virt_to_dma(chip,
+				&dma_buffer->data.flash_status[n]), 0);
+			cmd++;
+
+			msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info),
+				WRITE, data.clrfstatus, 0);
+			cmd++;
+
+			if (n == (cwperpage - 1)) {
+				msm_nand_prep_ce(cmd,
+					MSM_NAND_READ_STATUS(info), WRITE,
+					data.clrrstatus, SPS_IOVEC_FLAG_UNLOCK
+					| SPS_IOVEC_FLAG_INT);
+				cmd++;
+			}
+		}
+
+		BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+		dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+		dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+		dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+						&dma_buffer->cmd_iovec);
+		iovec = dma_buffer->xfer.iovec;
+
+		for (n = 0; n < dma_buffer->xfer.iovec_count; n++) {
+			iovec->addr =  msm_virt_to_dma(chip,
+					&dma_buffer->cmd[n].ce);
+			iovec->size = sizeof(struct sps_command_element);
+			iovec->flags = dma_buffer->cmd[n].flags;
+			iovec++;
+		}
+		mutex_lock(&info->bam_lock);
+		/* Submit data descriptors */
+		for (n = 0; n < cwperpage; n++) {
+			err = msm_nand_submit_rw_data_desc(ops,
+						&rw_params, info, n);
+			if (err) {
+				pr_err("Failed to submit data descs %d\n", err);
+				mutex_unlock(&info->bam_lock);
+				goto free_dma;
+			}
+		}
+		/* Submit command descriptors */
+		err =  sps_transfer(info->sps.cmd_pipe.handle,
+				&dma_buffer->xfer);
+		if (err) {
+			pr_err("Failed to submit commands %d\n", err);
+			mutex_unlock(&info->bam_lock);
+			goto free_dma;
+		}
+		wait_for_completion_io(&info->sps.cmd_pipe.completion);
+		wait_for_completion_io(&info->sps.data_cons.completion);
+		mutex_unlock(&info->bam_lock);
+
+		for (n = 0; n < cwperpage; n++)
+			pr_debug("write pg %d: flash_status[%d] = %x\n",
+				rw_params.page, n,
+				dma_buffer->data.flash_status[n]);
+
+		/*  Check for flash status errors */
+		for (n = 0; n < cwperpage; n++) {
+			flash_sts = dma_buffer->data.flash_status[n];
+			if (flash_sts & (FS_OP_ERR | FS_MPU_ERR)) {
+				pr_err("MPU/OP err (0x%x) set\n", flash_sts);
+				err = -EIO;
+				goto free_dma;
+			}
+			if (n == (cwperpage - 1)) {
+				if (!(flash_sts & FS_DEVICE_WP) ||
+					(flash_sts & FS_DEVICE_STS_ERR)) {
+					pr_err("Dev sts err 0x%x\n", flash_sts);
+					err = -EIO;
+					goto free_dma;
+				}
+			}
+		}
+		pages_written++;
+		rw_params.page++;
+	}
+free_dma:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+	if (ops->oobbuf)
+		dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
+				 ops->ooblen, DMA_TO_DEVICE);
+	if (ops->datbuf)
+		dma_unmap_page(chip->dev, rw_params.data_dma_addr,
+				ops->len, DMA_TO_DEVICE);
+validate_mtd_params_failed:
+	if (ops->mode != MTD_OPS_RAW)
+		ops->retlen = mtd->writesize * pages_written;
+	else
+		ops->retlen = (mtd->writesize + mtd->oobsize) * pages_written;
+
+	ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
+	if (err)
+		pr_err("to %llx datalen %x ooblen %x failed with err %d\n",
+		       to, ops->len, ops->ooblen, err);
+	pr_debug("ret %d, retlen %d oobretlen %d\n",
+			err, ops->retlen, ops->oobretlen);
+
+	pr_debug("================================================\n");
+	return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to write a
+ * page with only main data.
+ */
+static int msm_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+			  size_t *retlen, const u_char *buf)
+{
+	int ret;
+	struct mtd_oob_ops ops;
+
+	ops.mode = MTD_OPS_PLACE_OOB;
+	ops.len = len;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.datbuf = (uint8_t *)buf;
+	ops.oobbuf = NULL;
+	ret =  msm_nand_write_oob(mtd, to, &ops);
+	*retlen = ops.retlen;
+	return ret;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for Erase operation.
+ */
+struct msm_nand_erase_reg_data {
+	struct msm_nand_common_cfgs cfg;
+	uint32_t exec;
+	uint32_t flash_status;
+	uint32_t clrfstatus;
+	uint32_t clrrstatus;
+};
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to erase a
+ * block within NAND device.
+ */
+static int msm_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	int i, err = 0;
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	uint32_t page = 0;
+	struct msm_nand_sps_cmd *cmd, *curr_cmd;
+	struct msm_nand_erase_reg_data data;
+	struct sps_iovec *iovec;
+	uint32_t total_cnt = 9;
+	/*
+	 * The following 9 commands are required to erase a page -
+	 * flash, addr0, addr1, cfg0, cfg1, exec, flash_status(read),
+	 * flash_status(write), read_status.
+	 */
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[total_cnt];
+		struct msm_nand_sps_cmd cmd[total_cnt];
+		uint32_t flash_status;
+	} *dma_buffer;
+
+	if (mtd->writesize == PAGE_SIZE_2K)
+		page = instr->addr >> 11;
+
+	if (mtd->writesize == PAGE_SIZE_4K)
+		page = instr->addr >> 12;
+
+	if (instr->addr & (mtd->erasesize - 1)) {
+		pr_err("unsupported erase address, 0x%llx\n", instr->addr);
+		err = -EINVAL;
+		goto out;
+	}
+	if (instr->len != mtd->erasesize) {
+		pr_err("unsupported erase len, %lld\n", instr->len);
+		err = -EINVAL;
+		goto out;
+	}
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+			    chip, sizeof(*dma_buffer))));
+	cmd = dma_buffer->cmd;
+
+	memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
+	data.cfg.cmd = MSM_NAND_CMD_BLOCK_ERASE;
+	data.cfg.addr0 = page;
+	data.cfg.addr1 = 0;
+	data.cfg.cfg0 = chip->cfg0 & (~(7 << CW_PER_PAGE));
+	data.cfg.cfg1 = chip->cfg1;
+	data.exec = 1;
+	dma_buffer->flash_status = 0xeeeeeeee;
+	data.clrfstatus = MSM_NAND_RESET_FLASH_STS;
+	data.clrrstatus = MSM_NAND_RESET_READ_STS;
+
+	curr_cmd = cmd;
+	msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+	cmd = curr_cmd;
+	msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE, data.exec,
+			SPS_IOVEC_FLAG_NWD);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+		msm_virt_to_dma(chip, &dma_buffer->flash_status), 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), WRITE,
+			data.clrfstatus, 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_READ_STATUS(info), WRITE,
+			data.clrrstatus,
+			SPS_IOVEC_FLAG_UNLOCK | SPS_IOVEC_FLAG_INT);
+	cmd++;
+
+	BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+	dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+	dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+	dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+					&dma_buffer->cmd_iovec);
+	iovec = dma_buffer->xfer.iovec;
+
+	for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+		iovec->addr =  msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+		iovec->size = sizeof(struct sps_command_element);
+		iovec->flags = dma_buffer->cmd[i].flags;
+		iovec++;
+	}
+	mutex_lock(&info->bam_lock);
+	err =  sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+	if (err) {
+		pr_err("Failed to submit commands %d\n", err);
+		mutex_unlock(&info->bam_lock);
+		goto free_dma;
+	}
+	wait_for_completion_io(&info->sps.cmd_pipe.completion);
+	mutex_unlock(&info->bam_lock);
+
+	/*  Check for flash status errors */
+	if (dma_buffer->flash_status & (FS_OP_ERR |
+			FS_MPU_ERR | FS_DEVICE_STS_ERR)) {
+		pr_err("MPU/OP/DEV err (0x%x) set\n", dma_buffer->flash_status);
+		err = -EIO;
+	}
+	if (!(dma_buffer->flash_status & FS_DEVICE_WP)) {
+		pr_err("Device is write protected\n");
+		err = -EIO;
+	}
+	if (err) {
+		pr_err("Erase failed, 0x%llx\n", instr->addr);
+		instr->fail_addr = instr->addr;
+		instr->state = MTD_ERASE_FAILED;
+	} else {
+		instr->state = MTD_ERASE_DONE;
+		instr->fail_addr = 0xffffffff;
+		mtd_erase_callback(instr);
+	}
+free_dma:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+out:
+	return err;
+}
+
+/*
+ * Structure that contains NANDc register data for commands required
+ * for checking if a block is bad.
+ */
+struct msm_nand_blk_isbad_data {
+	struct msm_nand_common_cfgs cfg;
+	uint32_t ecc_bch_cfg;
+	uint32_t exec;
+	uint32_t read_offset;
+};
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to check if
+ * a block is bad. This is done by reading the first page within a block and
+ * checking whether the bad block byte location contains 0xFF or not. If it
+ * doesn't contain 0xFF, then it is considered as bad block.
+ */
+static int msm_nand_block_isbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	int i, ret = 0, bad_block = 0;
+	uint8_t *buf;
+	uint32_t page = 0, rdata, cwperpage;
+	struct msm_nand_sps_cmd *cmd, *curr_cmd;
+	struct msm_nand_blk_isbad_data data;
+	struct sps_iovec *iovec;
+	uint32_t total_cnt = 9;
+	/*
+	 * The following 9 commands are required to check bad block -
+	 * flash, addr0, addr1, cfg0, cfg1, ecc_cfg, read_loc_0,
+	 * exec, flash_status(read).
+	 */
+	struct {
+		struct sps_transfer xfer;
+		struct sps_iovec cmd_iovec[total_cnt];
+		struct msm_nand_sps_cmd cmd[total_cnt];
+		uint32_t flash_status;
+	} *dma_buffer;
+
+	if (mtd->writesize == PAGE_SIZE_2K)
+		page = ofs >> 11;
+
+	if (mtd->writesize == PAGE_SIZE_4K)
+		page = ofs >> 12;
+
+	cwperpage = (mtd->writesize >> 9);
+
+	if (ofs > mtd->size) {
+		pr_err("Invalid offset 0x%llx\n", ofs);
+		bad_block = -EINVAL;
+		goto out;
+	}
+	if (ofs & (mtd->erasesize - 1)) {
+		pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
+		bad_block = -EINVAL;
+		goto out;
+	}
+
+	wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+				chip , sizeof(*dma_buffer) + 4)));
+	buf = (uint8_t *)dma_buffer + sizeof(*dma_buffer);
+
+	cmd = dma_buffer->cmd;
+	memset(&data, 0, sizeof(struct msm_nand_erase_reg_data));
+	data.cfg.cmd = MSM_NAND_CMD_PAGE_READ_ALL;
+	data.cfg.cfg0 = chip->cfg0_raw & ~(7U << CW_PER_PAGE);
+	data.cfg.cfg1 = chip->cfg1_raw;
+
+	if (chip->cfg1 & (1 << WIDE_FLASH))
+		data.cfg.addr0 = (page << 16) |
+			((chip->cw_size * (cwperpage-1)) >> 1);
+	else
+		data.cfg.addr0 = (page << 16) |
+			(chip->cw_size * (cwperpage-1));
+
+	data.cfg.addr1 = (page >> 16) & 0xff;
+	data.ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+	data.exec = 1;
+	data.read_offset = (mtd->writesize - (chip->cw_size * (cwperpage-1)));
+	dma_buffer->flash_status = 0xeeeeeeee;
+
+	curr_cmd = cmd;
+	msm_nand_prep_cfg_cmd_desc(info, data.cfg, &curr_cmd);
+
+	cmd = curr_cmd;
+	msm_nand_prep_ce(cmd, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+			data.ecc_bch_cfg, 0);
+	cmd++;
+
+	rdata = (data.read_offset << 0) | (4 << 16) | (1 << 31);
+	msm_nand_prep_ce(cmd, MSM_NAND_READ_LOCATION_0(info), WRITE, rdata, 0);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_EXEC_CMD(info), WRITE,
+			data.exec, SPS_IOVEC_FLAG_NWD);
+	cmd++;
+
+	msm_nand_prep_ce(cmd, MSM_NAND_FLASH_STATUS(info), READ,
+		msm_virt_to_dma(chip, &dma_buffer->flash_status),
+		SPS_IOVEC_FLAG_INT | SPS_IOVEC_FLAG_UNLOCK);
+	cmd++;
+
+	BUG_ON(cmd - dma_buffer->cmd > ARRAY_SIZE(dma_buffer->cmd));
+	dma_buffer->xfer.iovec_count = (cmd - dma_buffer->cmd);
+	dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+	dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+					&dma_buffer->cmd_iovec);
+	iovec = dma_buffer->xfer.iovec;
+
+	for (i = 0; i < dma_buffer->xfer.iovec_count; i++) {
+		iovec->addr =  msm_virt_to_dma(chip, &dma_buffer->cmd[i].ce);
+		iovec->size = sizeof(struct sps_command_element);
+		iovec->flags = dma_buffer->cmd[i].flags;
+		iovec++;
+	}
+	mutex_lock(&info->bam_lock);
+	/* Submit data descriptor */
+	ret = sps_transfer_one(info->sps.data_prod.handle,
+			msm_virt_to_dma(chip, buf),
+			4, NULL, SPS_IOVEC_FLAG_INT);
+
+	if (ret) {
+		pr_err("Failed to submit data desc %d\n", ret);
+		mutex_unlock(&info->bam_lock);
+		goto free_dma;
+	}
+	/* Submit command descriptor */
+	ret =  sps_transfer(info->sps.cmd_pipe.handle, &dma_buffer->xfer);
+	if (ret) {
+		pr_err("Failed to submit commands %d\n", ret);
+		mutex_unlock(&info->bam_lock);
+		goto free_dma;
+	}
+	wait_for_completion_io(&info->sps.cmd_pipe.completion);
+	wait_for_completion_io(&info->sps.data_prod.completion);
+	mutex_unlock(&info->bam_lock);
+
+	/* Check for flash status errors */
+	if (dma_buffer->flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+		pr_err("MPU/OP err set: %x\n", dma_buffer->flash_status);
+		bad_block = -EIO;
+		goto free_dma;
+	}
+
+	/* Check for bad block marker byte */
+	if (chip->cfg1 & (1 << WIDE_FLASH)) {
+		if (buf[0] != 0xFF || buf[1] != 0xFF)
+			bad_block = 1;
+	} else {
+		if (buf[0] != 0xFF)
+			bad_block = 1;
+	}
+free_dma:
+	msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer) + 4);
+out:
+	return ret ? ret : bad_block;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to mark a
+ * block as bad. This is done by writing the first page within a block with 0,
+ * thus setting the bad block byte location as well to 0.
+ */
+static int msm_nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct mtd_oob_ops ops;
+	int ret;
+	uint8_t *buf;
+	size_t len;
+
+	if (ofs > mtd->size) {
+		pr_err("Invalid offset 0x%llx\n", ofs);
+		ret = -EINVAL;
+		goto out;
+	}
+	if (ofs & (mtd->erasesize - 1)) {
+		pr_err("unsupported block address, 0x%x\n", (uint32_t)ofs);
+		ret = -EINVAL;
+		goto out;
+	}
+	len = mtd->writesize + mtd->oobsize;
+	buf = kzalloc(len, GFP_KERNEL);
+	if (!buf) {
+		pr_err("unable to allocate memory for 0x%x size\n", len);
+		ret = -ENOMEM;
+		goto out;
+	}
+	ops.mode = MTD_OPS_RAW;
+	ops.len = len;
+	ops.retlen = 0;
+	ops.ooblen = 0;
+	ops.datbuf = buf;
+	ops.oobbuf = NULL;
+	ret =  msm_nand_write_oob(mtd, ofs, &ops);
+	kfree(buf);
+out:
+	return ret;
+}
+
+/*
+ * Function that scans for the attached NAND device. This fills out all
+ * the uninitialized function pointers with the defaults. The flash ID is
+ * read and the mtd/chip structures are filled with the appropriate values.
+ */
+int msm_nand_scan(struct mtd_info *mtd)
+{
+	struct msm_nand_info *info = mtd->priv;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	struct flash_identification *supported_flash = &info->flash_dev;
+	int flash_id = 0, err = 0;
+	uint32_t i, mtd_writesize;
+	uint8_t dev_found = 0, wide_bus;
+	uint32_t manid, devid, devcfg;
+	uint32_t bad_block_byte;
+	struct nand_flash_dev *flashdev = NULL;
+	struct nand_manufacturers  *flashman = NULL;
+
+	/* Probe the Flash device for ONFI compliance */
+	if (!msm_nand_flash_onfi_probe(info)) {
+		dev_found = 1;
+	} else {
+		err = msm_nand_flash_read_id(info, 0, &flash_id);
+		if (err < 0) {
+			pr_err("Failed to read Flash ID\n");
+			err = -EINVAL;
+			goto out;
+		}
+		manid = flash_id & 0xFF;
+		devid = (flash_id >> 8) & 0xFF;
+		devcfg = (flash_id >> 24) & 0xFF;
+
+		for (i = 0; !flashman && nand_manuf_ids[i].id; ++i)
+			if (nand_manuf_ids[i].id == manid)
+				flashman = &nand_manuf_ids[i];
+		for (i = 0; !flashdev && nand_flash_ids[i].id; ++i)
+			if (nand_flash_ids[i].id == devid)
+				flashdev = &nand_flash_ids[i];
+		if (!flashdev || !flashman) {
+			pr_err("unknown nand flashid=%x manuf=%x devid=%x\n",
+				flash_id, manid, devid);
+			err = -ENOENT;
+			goto out;
+		}
+		dev_found = 1;
+		if (!flashdev->pagesize) {
+			supported_flash->widebus = devcfg & (1 << 6) ? 1 : 0;
+			supported_flash->pagesize = 1024 << (devcfg & 0x3);
+			supported_flash->blksize = (64 * 1024) <<
+							((devcfg >> 4) & 0x3);
+			supported_flash->oobsize = (8 << ((devcfg >> 2) & 1)) *
+				(supported_flash->pagesize >> 9);
+		} else {
+			supported_flash->widebus = flashdev->options &
+				       NAND_BUSWIDTH_16 ? 1 : 0;
+			supported_flash->pagesize = flashdev->pagesize;
+			supported_flash->blksize = flashdev->erasesize;
+			supported_flash->oobsize = flashdev->pagesize >> 5;
+		}
+		supported_flash->flash_id = flash_id;
+		supported_flash->density = flashdev->chipsize << 20;
+	}
+
+	if (dev_found) {
+		wide_bus       = supported_flash->widebus;
+		mtd->size      = supported_flash->density;
+		mtd->writesize = supported_flash->pagesize;
+		mtd->oobsize   = supported_flash->oobsize;
+		mtd->erasesize = supported_flash->blksize;
+		mtd_writesize = mtd->writesize;
+
+		/* Check whether NAND device support 8bit ECC*/
+		if (supported_flash->ecc_correctability >= 8)
+			chip->bch_caps = MSM_NAND_CAP_8_BIT_BCH;
+		else
+			chip->bch_caps = MSM_NAND_CAP_4_BIT_BCH;
+
+		pr_info("NAND Id: 0x%x Buswidth: %dBits Density: %lld MByte\n",
+			supported_flash->flash_id, (wide_bus) ? 16 : 8,
+			(mtd->size >> 20));
+		pr_info("pagesize: %d Erasesize: %d oobsize: %d (in Bytes)\n",
+			mtd->writesize, mtd->erasesize, mtd->oobsize);
+		pr_info("BCH ECC: %d Bit\n",
+			(chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH ? 8 : 4));
+	}
+
+	chip->cw_size = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ? 532 : 528;
+	chip->cfg0 = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
+		|  (516 <<  UD_SIZE_BYTES)
+		|  (0 << DISABLE_STATUS_AFTER_WRITE)
+		|  (5 << NUM_ADDR_CYCLES);
+
+	bad_block_byte = (mtd_writesize - (chip->cw_size * (
+					(mtd_writesize >> 9) - 1)) + 1);
+	chip->cfg1 = (7 <<  NAND_RECOVERY_CYCLES)
+		|    (0 <<  CS_ACTIVE_BSY)
+		|    (bad_block_byte <<  BAD_BLOCK_BYTE_NUM)
+		|    (0 << BAD_BLOCK_IN_SPARE_AREA)
+		|    (2 << WR_RD_BSY_GAP)
+		| ((wide_bus ? 1 : 0) << WIDE_FLASH)
+		| (1 << ENABLE_BCH_ECC);
+
+	chip->cfg0_raw = (((mtd_writesize >> 9) - 1) << CW_PER_PAGE)
+		|	(5 << NUM_ADDR_CYCLES)
+		|	(0 << SPARE_SIZE_BYTES)
+		|	(chip->cw_size << UD_SIZE_BYTES);
+
+	chip->cfg1_raw = (7 <<  NAND_RECOVERY_CYCLES)
+		|    (0 <<  CS_ACTIVE_BSY)
+		|    (17 <<  BAD_BLOCK_BYTE_NUM)
+		|    (1 << BAD_BLOCK_IN_SPARE_AREA)
+		|    (2 << WR_RD_BSY_GAP)
+		| ((wide_bus ? 1 : 0) << WIDE_FLASH)
+		| (1 << DEV0_CFG1_ECC_DISABLE);
+
+	chip->ecc_bch_cfg = (0 << ECC_CFG_ECC_DISABLE)
+			|   (0 << ECC_SW_RESET)
+			|   (516 << ECC_NUM_DATA_BYTES)
+			|   (1 << ECC_FORCE_CLK_OPEN);
+
+	if (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) {
+		chip->cfg0 |= (wide_bus ? 0 << SPARE_SIZE_BYTES :
+				2 << SPARE_SIZE_BYTES);
+		chip->ecc_bch_cfg |= (1 << ECC_MODE)
+			|   ((wide_bus) ? (14 << ECC_PARITY_SIZE_BYTES) :
+					(13 << ECC_PARITY_SIZE_BYTES));
+	} else {
+		chip->cfg0 |= (wide_bus ? 2 << SPARE_SIZE_BYTES :
+				4 << SPARE_SIZE_BYTES);
+		chip->ecc_bch_cfg |= (0 << ECC_MODE)
+			|   ((wide_bus) ? (8 << ECC_PARITY_SIZE_BYTES) :
+					(7 << ECC_PARITY_SIZE_BYTES));
+	}
+
+	/*
+	 * For 4bit BCH ECC (default ECC), parity bytes = 7(x8) or 8(x16 I/O)
+	 * For 8bit BCH ECC, parity bytes = 13 (x8) or 14 (x16 I/O).
+	 */
+	chip->ecc_parity_bytes = (chip->bch_caps & MSM_NAND_CAP_8_BIT_BCH) ?
+				(wide_bus ? 14 : 13) : (wide_bus ? 8 : 7);
+	chip->ecc_buf_cfg = 0x203; /* No of bytes covered by ECC - 516 bytes */
+
+	pr_info("CFG0: 0x%08x,      CFG1: 0x%08x\n"
+		"            RAWCFG0: 0x%08x,   RAWCFG1: 0x%08x\n"
+		"          ECCBUFCFG: 0x%08x, ECCBCHCFG: 0x%08x\n"
+		"     BAD BLOCK BYTE: 0x%08x\n", chip->cfg0, chip->cfg1,
+		chip->cfg0_raw, chip->cfg1_raw, chip->ecc_buf_cfg,
+		chip->ecc_bch_cfg, bad_block_byte);
+
+	if (mtd->oobsize == 64) {
+		mtd->oobavail = 16;
+	} else if ((mtd->oobsize == 128) || (mtd->oobsize == 224)) {
+		mtd->oobavail = 32;
+	} else {
+		pr_err("Unsupported NAND oobsize: 0x%x\n", mtd->oobsize);
+		err = -ENODEV;
+		goto out;
+	}
+
+	/* Fill in remaining MTD driver data */
+	mtd->type = MTD_NANDFLASH;
+	mtd->flags = MTD_CAP_NANDFLASH;
+	mtd->_erase = msm_nand_erase;
+	mtd->_block_isbad = msm_nand_block_isbad;
+	mtd->_block_markbad = msm_nand_block_markbad;
+	mtd->_read = msm_nand_read;
+	mtd->_write = msm_nand_write;
+	mtd->_read_oob  = msm_nand_read_oob;
+	mtd->_write_oob = msm_nand_write_oob;
+	mtd->owner = THIS_MODULE;
+out:
+	return err;
+}
+
+#define BAM_APPS_PIPE_LOCK_GRP 0
+/*
+ * This function allocates, configures, connects an end point and
+ * also registers event notification for an end point. It also allocates
+ * DMA memory for descriptor FIFO of a pipe.
+ */
+static int msm_nand_init_endpoint(struct msm_nand_info *info,
+				struct msm_nand_sps_endpt *end_point,
+				uint32_t pipe_index)
+{
+	int rc = 0;
+	struct sps_pipe *pipe_handle;
+	struct sps_connect *sps_config = &end_point->config;
+	struct sps_register_event *sps_event = &end_point->event;
+
+	pipe_handle = sps_alloc_endpoint();
+	if (!pipe_handle) {
+		pr_err("sps_alloc_endpoint() failed\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rc = sps_get_config(pipe_handle, sps_config);
+	if (rc) {
+		pr_err("sps_get_config() failed %d\n", rc);
+		goto free_endpoint;
+	}
+
+	if (pipe_index == SPS_DATA_PROD_PIPE_INDEX) {
+		/* READ CASE: source - BAM; destination - system memory */
+		sps_config->source = info->sps.bam_handle;
+		sps_config->destination = SPS_DEV_HANDLE_MEM;
+		sps_config->mode = SPS_MODE_SRC;
+		sps_config->src_pipe_index = pipe_index;
+	} else if (pipe_index == SPS_DATA_CONS_PIPE_INDEX ||
+			pipe_index == SPS_CMD_CONS_PIPE_INDEX) {
+		/* WRITE CASE: source - system memory; destination - BAM */
+		sps_config->source = SPS_DEV_HANDLE_MEM;
+		sps_config->destination = info->sps.bam_handle;
+		sps_config->mode = SPS_MODE_DEST;
+		sps_config->dest_pipe_index = pipe_index;
+	}
+
+	sps_config->options = SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
+	sps_config->lock_group = BAM_APPS_PIPE_LOCK_GRP;
+	/*
+	 * Descriptor FIFO is a cyclic FIFO. If SPS_MAX_DESC_NUM descriptors
+	 * are allowed to be submitted before we get any ack for any of them,
+	 * the descriptor FIFO size should be: (SPS_MAX_DESC_NUM + 1) *
+	 * sizeof(struct sps_iovec).
+	 */
+	sps_config->desc.size = (SPS_MAX_DESC_NUM + 1) *
+					sizeof(struct sps_iovec);
+	sps_config->desc.base = dmam_alloc_coherent(info->nand_chip.dev,
+					sps_config->desc.size,
+					&sps_config->desc.phys_base,
+					GFP_KERNEL);
+	if (!sps_config->desc.base) {
+		pr_err("dmam_alloc_coherent() failed for size %x\n",
+				sps_config->desc.size);
+		rc = -ENOMEM;
+		goto free_endpoint;
+	}
+	memset(sps_config->desc.base, 0x00, sps_config->desc.size);
+
+	rc = sps_connect(pipe_handle, sps_config);
+	if (rc) {
+		pr_err("sps_connect() failed %d\n", rc);
+		goto free_endpoint;
+	}
+
+	init_completion(&end_point->completion);
+	sps_event->mode = SPS_TRIGGER_WAIT;
+	sps_event->options = SPS_O_DESC_DONE;
+	sps_event->xfer_done = &end_point->completion;
+	sps_event->user = (void *)info;
+
+	rc = sps_register_event(pipe_handle, sps_event);
+	if (rc) {
+		pr_err("sps_register_event() failed %d\n", rc);
+		goto sps_disconnect;
+	}
+	end_point->handle = pipe_handle;
+	pr_debug("pipe handle 0x%x for pipe %d\n", (uint32_t)pipe_handle,
+			pipe_index);
+	goto out;
+sps_disconnect:
+	sps_disconnect(pipe_handle);
+free_endpoint:
+	sps_free_endpoint(pipe_handle);
+out:
+	return rc;
+}
+
+/* This function disconnects and frees an end point */
+static void msm_nand_deinit_endpoint(struct msm_nand_info *info,
+				struct msm_nand_sps_endpt *end_point)
+{
+	sps_disconnect(end_point->handle);
+	sps_free_endpoint(end_point->handle);
+}
+
+/*
+ * This function registers BAM device and initializes its end points for
+ * the following pipes -
+ * system consumer pipe for data (pipe#0),
+ * system producer pipe for data (pipe#1),
+ * system consumer pipe for commands (pipe#2).
+ */
+static int msm_nand_bam_init(struct msm_nand_info *nand_info)
+{
+	struct sps_bam_props bam = {0};
+	int rc = 0;
+
+	bam.phys_addr = nand_info->bam_phys;
+	bam.virt_addr = nand_info->bam_base;
+	bam.irq = nand_info->bam_irq;
+	/*
+	 * NAND device is accessible from both Apps and Modem processor and
+	 * thus, NANDc and BAM are shared between both the processors. But BAM
+	 * must be enabled and instantiated only once during boot up by
+	 * Trustzone before Modem/Apps is brought out from reset.
+	 *
+	 * This is indicated to SPS driver on Apps by marking flag
+	 * SPS_BAM_MGR_DEVICE_REMOTE. The following are the global
+	 * initializations that will be done by Trustzone - Execution
+	 * Environment, Pipes assignment to Apps/Modem, Pipe Super groups and
+	 * Descriptor summing threshold.
+	 *
+	 * NANDc BAM device supports 2 execution environments - Modem and Apps
+	 * and thus the flag SPS_BAM_MGR_MULTI_EE is set.
+	 */
+	bam.manage = SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE;
+
+	rc = sps_register_bam_device(&bam, &nand_info->sps.bam_handle);
+	if (rc) {
+		pr_err("sps_register_bam_device() failed with %d\n", rc);
+		goto out;
+	}
+	pr_info("BAM device registered: bam_handle 0x%x\n",
+			nand_info->sps.bam_handle);
+
+	rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_prod,
+					SPS_DATA_PROD_PIPE_INDEX);
+	if (rc)
+		goto unregister_bam;
+
+	rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.data_cons,
+					SPS_DATA_CONS_PIPE_INDEX);
+	if (rc)
+		goto deinit_data_prod;
+
+	rc = msm_nand_init_endpoint(nand_info, &nand_info->sps.cmd_pipe,
+					SPS_CMD_CONS_PIPE_INDEX);
+	if (rc)
+		goto deinit_data_cons;
+	goto out;
+deinit_data_cons:
+	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
+deinit_data_prod:
+	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
+unregister_bam:
+	sps_deregister_bam_device(nand_info->sps.bam_handle);
+out:
+	return rc;
+}
+
+/*
+ * This function de-registers BAM device, disconnects and frees its end points
+ * for all the pipes.
+ */
+static void msm_nand_bam_free(struct msm_nand_info *nand_info)
+{
+	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_prod);
+	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.data_cons);
+	msm_nand_deinit_endpoint(nand_info, &nand_info->sps.cmd_pipe);
+	sps_deregister_bam_device(nand_info->sps.bam_handle);
+}
+
+/* This function enables DMA support for the NANDc in BAM mode. */
+static int msm_nand_enable_dma(struct msm_nand_info *info)
+{
+	struct msm_nand_sps_cmd *sps_cmd;
+	struct msm_nand_chip *chip = &info->nand_chip;
+	int ret;
+
+	wait_event(chip->dma_wait_queue,
+		   (sps_cmd = msm_nand_get_dma_buffer(chip, sizeof(*sps_cmd))));
+
+	msm_nand_prep_ce(sps_cmd, MSM_NAND_CTRL(info), WRITE,
+			(1 << BAM_MODE_EN), SPS_IOVEC_FLAG_INT);
+
+	ret = sps_transfer_one(info->sps.cmd_pipe.handle,
+			msm_virt_to_dma(chip, &sps_cmd->ce),
+			sizeof(struct sps_command_element), NULL,
+			sps_cmd->flags);
+	if (ret) {
+		pr_err("Failed to submit command: %d\n", ret);
+		goto out;
+	}
+	wait_for_completion_io(&info->sps.cmd_pipe.completion);
+out:
+	msm_nand_release_dma_buffer(chip, sps_cmd, sizeof(*sps_cmd));
+	return ret;
+
+}
+
+/*
+ * This function gets called when its device named msm-nand is added to
+ * device tree .dts file with all its resources such as physical addresses
+ * for NANDc and BAM, BAM IRQ.
+ *
+ * It also expects the NAND flash partition information to be passed in .dts
+ * file so that it can parse the partitions by calling MTD function
+ * mtd_device_parse_register().
+ *
+ */
+static int __devinit msm_nand_probe(struct platform_device *pdev)
+{
+	struct msm_nand_info *info;
+	struct resource *res;
+	int err, n_parts;
+	struct device_node *pnode;
+	struct mtd_part_parser_data parser_data;
+
+	if (!pdev->dev.of_node) {
+		pr_err("No valid device tree info for NANDc\n");
+		err = -ENODEV;
+		goto out;
+	}
+
+	/*
+	 * The partition information can also be passed from kernel command
+	 * line. Also, the MTD core layer supports adding the whole device as
+	 * one MTD device when no partition information is available at all.
+	 * Hence, do not bail out when partition information is not availabe
+	 * in device tree.
+	 */
+	pnode = of_find_node_by_path("/qcom,mtd-partitions");
+	if (!pnode)
+		pr_info("No partition info available in device tree\n");
+	info = devm_kzalloc(&pdev->dev, sizeof(struct msm_nand_info),
+				GFP_KERNEL);
+	if (!info) {
+		pr_err("Unable to allocate memory for msm_nand_info\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"nand_phys");
+	if (!res || !res->start) {
+		pr_err("NAND phys address range is not provided\n");
+		err = -ENODEV;
+		goto out;
+	}
+	info->nand_phys = res->start;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"bam_phys");
+	if (!res || !res->start) {
+		pr_err("BAM phys address range is not provided\n");
+		err = -ENODEV;
+		goto out;
+	}
+	info->bam_phys = res->start;
+	info->bam_base = devm_ioremap(&pdev->dev, res->start,
+					resource_size(res));
+	if (!info->bam_base) {
+		pr_err("BAM ioremap() failed for addr 0x%x size 0x%x\n",
+			res->start, resource_size(res));
+		err = -ENOMEM;
+		goto out;
+	}
+
+	info->bam_irq = platform_get_irq_byname(pdev, "bam_irq");
+	if (info->bam_irq < 0) {
+		pr_err("BAM IRQ is not provided\n");
+		err = -ENODEV;
+		goto out;
+	}
+
+	info->mtd.name = dev_name(&pdev->dev);
+	info->mtd.priv = info;
+	info->mtd.owner = THIS_MODULE;
+	info->nand_chip.dev = &pdev->dev;
+	init_waitqueue_head(&info->nand_chip.dma_wait_queue);
+	mutex_init(&info->bam_lock);
+
+	info->nand_chip.dma_virt_addr =
+		dmam_alloc_coherent(&pdev->dev, MSM_NAND_DMA_BUFFER_SIZE,
+			&info->nand_chip.dma_phys_addr, GFP_KERNEL);
+	if (!info->nand_chip.dma_virt_addr) {
+		pr_err("No memory for DMA buffer size %x\n",
+				MSM_NAND_DMA_BUFFER_SIZE);
+		err = -ENOMEM;
+		goto out;
+	}
+	err = msm_nand_bam_init(info);
+	if (err) {
+		pr_err("msm_nand_bam_init() failed %d\n", err);
+		goto out;
+	}
+	err = msm_nand_enable_dma(info);
+	if (err) {
+		pr_err("Failed to enable DMA in NANDc\n");
+		goto free_bam;
+	}
+	if (msm_nand_scan(&info->mtd)) {
+		pr_err("No nand device found\n");
+		err = -ENXIO;
+		goto free_bam;
+	}
+	parser_data.of_node = pnode;
+	n_parts = mtd_device_parse_register(&info->mtd, NULL, &parser_data,
+					NULL, 0);
+	if (n_parts < 0) {
+		pr_err("Unable to register MTD partitions %d\n", n_parts);
+		goto free_bam;
+	}
+	dev_set_drvdata(&pdev->dev, info);
+
+	pr_info("NANDc phys addr 0x%lx, BAM phys addr 0x%lx, BAM IRQ %d\n",
+			info->nand_phys, info->bam_phys, info->bam_irq);
+	pr_info("Allocated DMA buffer at virt_addr 0x%p, phys_addr 0x%x\n",
+		info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
+	pr_info("Found %d MTD partitions\n", n_parts);
+	goto out;
+free_bam:
+	msm_nand_bam_free(info);
+out:
+	return err;
+}
+
+/*
+ * Remove functionality that gets called when driver/device msm-nand
+ * is removed.
+ */
+static int __devexit msm_nand_remove(struct platform_device *pdev)
+{
+	struct msm_nand_info *info = dev_get_drvdata(&pdev->dev);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+	if (info) {
+		mtd_device_unregister(&info->mtd);
+		msm_nand_bam_free(info);
+	}
+	return 0;
+}
+
+#define DRIVER_NAME "msm_qpic_nand"
+static const struct of_device_id msm_nand_match_table[] = {
+	{ .compatible = "qcom,msm-nand", },
+	{},
+};
+static struct platform_driver msm_nand_driver = {
+	.probe		= msm_nand_probe,
+	.remove		= __devexit_p(msm_nand_remove),
+	.driver = {
+		.name		= DRIVER_NAME,
+		.of_match_table = msm_nand_match_table,
+	},
+};
+
+module_platform_driver(msm_nand_driver);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM QPIC NAND flash driver");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2b73d99..d26c845 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -87,6 +87,8 @@
 
 static const char driver_name [] = "usbnet";
 
+static struct workqueue_struct	*usbnet_wq;
+
 /* use ethtool to change the level for any given device */
 static int msg_level = -1;
 module_param (msg_level, int, 0);
@@ -246,7 +248,7 @@
 	if (skb_defer_rx_timestamp(skb))
 		return;
 
-	status = netif_rx (skb);
+	status = netif_rx_ni(skb);
 	if (status != NET_RX_SUCCESS)
 		netif_dbg(dev, rx_err, dev->net,
 			  "netif_rx status %d\n", status);
@@ -316,7 +318,7 @@
 	spin_lock(&dev->done.lock);
 	__skb_queue_tail(&dev->done, skb);
 	if (dev->done.qlen == 1)
-		tasklet_schedule(&dev->bh);
+		queue_work(usbnet_wq, &dev->bh_w);
 	spin_unlock_irqrestore(&dev->done.lock, flags);
 	return old_state;
 }
@@ -390,7 +392,7 @@
 		default:
 			netif_dbg(dev, rx_err, dev->net,
 				  "rx submit, %d\n", retval);
-			tasklet_schedule (&dev->bh);
+			queue_work(usbnet_wq, &dev->bh_w);
 			break;
 		case 0:
 			usb_mark_last_busy(dev->udev);
@@ -583,7 +585,7 @@
 		num++;
 	}
 
-	tasklet_schedule(&dev->bh);
+	queue_work(usbnet_wq, &dev->bh_w);
 
 	netif_dbg(dev, rx_status, dev->net,
 		  "paused rx queue disabled, %d skbs requeued\n", num);
@@ -652,7 +654,7 @@
 {
 	if (netif_running(dev->net)) {
 		(void) unlink_urbs (dev, &dev->rxq);
-		tasklet_schedule(&dev->bh);
+		queue_work(usbnet_wq, &dev->bh_w);
 	}
 }
 EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
@@ -726,7 +728,7 @@
 	 */
 	dev->flags = 0;
 	del_timer_sync (&dev->delay);
-	tasklet_kill (&dev->bh);
+	cancel_work_sync(&dev->bh_w);
 	if (info->manage_power)
 		info->manage_power(dev, 0);
 	else
@@ -799,7 +801,7 @@
 		   "simple");
 
 	// delay posting reads until we're fully open
-	tasklet_schedule (&dev->bh);
+	queue_work(usbnet_wq, &dev->bh_w);
 	if (info->manage_power) {
 		retval = info->manage_power(dev, 1);
 		if (retval < 0)
@@ -969,7 +971,7 @@
 					   status);
 		} else {
 			clear_bit (EVENT_RX_HALT, &dev->flags);
-			tasklet_schedule (&dev->bh);
+			queue_work(usbnet_wq, &dev->bh_w);
 		}
 	}
 
@@ -994,7 +996,7 @@
 			usb_autopm_put_interface(dev->intf);
 fail_lowmem:
 			if (resched)
-				tasklet_schedule (&dev->bh);
+				queue_work(usbnet_wq, &dev->bh_w);
 		}
 	}
 
@@ -1080,7 +1082,7 @@
 	struct usbnet		*dev = netdev_priv(net);
 
 	unlink_urbs (dev, &dev->txq);
-	tasklet_schedule (&dev->bh);
+	queue_work(usbnet_wq, &dev->bh_w);
 
 	// FIXME: device recovery -- reset?
 }
@@ -1267,13 +1269,21 @@
 					  "rxqlen %d --> %d\n",
 					  temp, dev->rxq.qlen);
 			if (dev->rxq.qlen < qlen)
-				tasklet_schedule (&dev->bh);
+				queue_work(usbnet_wq, &dev->bh_w);
 		}
 		if (dev->txq.qlen < TX_QLEN (dev))
 			netif_wake_queue (dev->net);
 	}
 }
 
+static void usbnet_bh_w(struct work_struct *work)
+{
+	struct usbnet		*dev =
+		container_of(work, struct usbnet, bh_w);
+	unsigned long param = (unsigned long)dev;
+
+	usbnet_bh(param);
+}
 
 /*-------------------------------------------------------------------------
  *
@@ -1392,8 +1402,7 @@
 	skb_queue_head_init (&dev->txq);
 	skb_queue_head_init (&dev->done);
 	skb_queue_head_init(&dev->rxq_pause);
-	dev->bh.func = usbnet_bh;
-	dev->bh.data = (unsigned long) dev;
+	INIT_WORK(&dev->bh_w, usbnet_bh_w);
 	INIT_WORK (&dev->kevent, kevent);
 	init_usb_anchor(&dev->deferred);
 	dev->delay.function = usbnet_bh;
@@ -1577,7 +1586,7 @@
 		if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
 			if (!(dev->txq.qlen >= TX_QLEN(dev)))
 				netif_tx_wake_all_queues(dev->net);
-			tasklet_schedule (&dev->bh);
+			queue_work(usbnet_wq, &dev->bh_w);
 		}
 	}
 	return 0;
@@ -1594,12 +1603,20 @@
 		FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
 
 	random_ether_addr(node_id);
+
+	usbnet_wq  = create_singlethread_workqueue("usbnet");
+	if (!usbnet_wq) {
+		pr_err("%s: Unable to create workqueue:usbnet\n", __func__);
+		return -ENOMEM;
+	}
+
 	return 0;
 }
 module_init(usbnet_init);
 
 static void __exit usbnet_exit(void)
 {
+	destroy_workqueue(usbnet_wq);
 }
 module_exit(usbnet_exit);
 
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index ad9dc7d..7695778 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -184,6 +184,12 @@
 	return 0;
 }
 
+void wcnss_flush_delayed_boot_votes()
+{
+	flush_delayed_work_sync(&penv->wcnss_work);
+}
+EXPORT_SYMBOL(wcnss_flush_delayed_boot_votes);
+
 static int __devexit
 wcnss_wlan_ctrl_remove(struct platform_device *pdev)
 {
diff --git a/drivers/platform/msm/qpnp-pwm.c b/drivers/platform/msm/qpnp-pwm.c
index c9cd0e07..708d658 100644
--- a/drivers/platform/msm/qpnp-pwm.c
+++ b/drivers/platform/msm/qpnp-pwm.c
@@ -440,7 +440,7 @@
 	unsigned int		pwm_value, max_pwm_value;
 	struct qpnp_lpg_chip	*chip = pwm->chip;
 	struct qpnp_lut_config	*lut = &chip->lpg_config.lut_config;
-	int			i, pwm_size, rc;
+	int			i, pwm_size, rc = 0;
 	int			burst_size = SPMI_MAX_BUF_LEN;
 	int			list_len = lut->list_size << 1;
 	int			offset = lut->lo_index << 2;
diff --git a/drivers/power/smb349.c b/drivers/power/smb349.c
index ffc92d5..f9ca81c 100644
--- a/drivers/power/smb349.c
+++ b/drivers/power/smb349.c
@@ -617,6 +617,8 @@
 
 	the_smb349_chg = smb349_chg;
 
+	spin_lock_init(&smb349_chg->lock);
+
 	create_debugfs_entries(smb349_chg);
 	INIT_WORK(&smb349_chg->chg_work, chg_worker);
 
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index a8d3720..0575d80 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -28,6 +28,33 @@
 static uint32_t limited_max_freq = MSM_CPUFREQ_NO_LIMIT;
 static struct delayed_work check_temp_work;
 
+static int limit_idx;
+static int limit_idx_low;
+static int limit_idx_high;
+static struct cpufreq_frequency_table *table;
+
+static int msm_thermal_get_freq_table(void)
+{
+	int ret = 0;
+	int i = 0;
+
+	table = cpufreq_frequency_get_table(0);
+	if (table == NULL) {
+		pr_debug("%s: error reading cpufreq table\n", __func__);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	while (table[i].frequency != CPUFREQ_TABLE_END)
+		i++;
+
+	limit_idx_low = 0;
+	limit_idx_high = limit_idx = i - 1;
+	BUG_ON(limit_idx_high <= 0 || limit_idx_high <= limit_idx_low);
+fail:
+	return ret;
+}
+
 static int update_cpu_max_freq(int cpu, uint32_t max_freq)
 {
 	int ret = 0;
@@ -36,10 +63,6 @@
 	if (ret)
 		return ret;
 
-	ret = cpufreq_update_policy(cpu);
-	if (ret)
-		return ret;
-
 	limited_max_freq = max_freq;
 	if (max_freq != MSM_CPUFREQ_NO_LIMIT)
 		pr_info("msm_thermal: Limiting cpu%d max frequency to %d\n",
@@ -47,11 +70,14 @@
 	else
 		pr_info("msm_thermal: Max frequency reset for cpu%d\n", cpu);
 
+	ret = cpufreq_update_policy(cpu);
+
 	return ret;
 }
 
 static void check_temp(struct work_struct *work)
 {
+	static int limit_init;
 	struct tsens_device tsens_dev;
 	unsigned long temp = 0;
 	uint32_t max_freq = limited_max_freq;
@@ -66,12 +92,34 @@
 		goto reschedule;
 	}
 
-	if (temp >= msm_thermal_info.limit_temp)
-		max_freq = msm_thermal_info.limit_freq;
-	else if (temp <
-		msm_thermal_info.limit_temp - msm_thermal_info.temp_hysteresis)
-		max_freq = MSM_CPUFREQ_NO_LIMIT;
+	if (!limit_init) {
+		ret = msm_thermal_get_freq_table();
+		if (ret)
+			goto reschedule;
+		else
+			limit_init = 1;
+	}
 
+	if (temp >= msm_thermal_info.limit_temp_degC) {
+		if (limit_idx == limit_idx_low)
+			goto reschedule;
+
+		limit_idx -= msm_thermal_info.freq_step;
+		if (limit_idx < limit_idx_low)
+			limit_idx = limit_idx_low;
+		max_freq = table[limit_idx].frequency;
+	} else if (temp < msm_thermal_info.limit_temp_degC -
+		 msm_thermal_info.temp_hysteresis_degC) {
+		if (limit_idx == limit_idx_high)
+			goto reschedule;
+
+		limit_idx += msm_thermal_info.freq_step;
+		if (limit_idx >= limit_idx_high) {
+			limit_idx = limit_idx_high;
+			max_freq = MSM_CPUFREQ_NO_LIMIT;
+		} else
+			max_freq = table[limit_idx].frequency;
+	}
 	if (max_freq == limited_max_freq)
 		goto reschedule;
 
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index c483bb45..a5235ba 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -503,11 +503,10 @@
 
 	if (msm_port->uim) {
 		msm_write(port,
-			UART_SIM_CFG_UIM_TX_MODE |
-			UART_SIM_CFG_UIM_RX_MODE |
 			UART_SIM_CFG_STOP_BIT_LEN_N(1) |
 			UART_SIM_CFG_SIM_CLK_ON |
 			UART_SIM_CFG_SIM_CLK_STOP_HIGH |
+			UART_SIM_CFG_MASK_RX |
 			UART_SIM_CFG_SIM_SEL,
 			UART_SIM_CFG);
 
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index a769825..34228ec 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -108,6 +108,7 @@
 #define UART_SIM_CFG_SIM_CLK_ON		(1 << 7)
 #define UART_SIM_CFG_SIM_CLK_TD8_SEL	(1 << 6)
 #define UART_SIM_CFG_SIM_CLK_STOP_HIGH	(1 << 5)
+#define UART_SIM_CFG_MASK_RX		(1 << 3)
 #define UART_SIM_CFG_SIM_SEL		(1 << 0)
 
 #define UART_MISR_MODE		0x0040
diff --git a/drivers/usb/gadget/android.c b/drivers/usb/gadget/android.c
index 9339800..54c486e 100644
--- a/drivers/usb/gadget/android.c
+++ b/drivers/usb/gadget/android.c
@@ -71,6 +71,11 @@
 #include "u_ether.c"
 #include "u_bam_data.c"
 #include "f_mbim.c"
+#include "f_qc_ecm.c"
+#include "u_qc_ether.c"
+#ifdef CONFIG_TARGET_CORE
+#include "f_tcm.c"
+#endif
 
 MODULE_AUTHOR("Mike Lockwood");
 MODULE_DESCRIPTION("Android Composite USB Driver");
@@ -549,6 +554,95 @@
 	.attributes	= rmnet_function_attributes,
 };
 
+struct ecm_function_config {
+	u8      ethaddr[ETH_ALEN];
+};
+
+static int ecm_function_init(struct android_usb_function *f,
+				struct usb_composite_dev *cdev)
+{
+	f->config = kzalloc(sizeof(struct ecm_function_config), GFP_KERNEL);
+	if (!f->config)
+		return -ENOMEM;
+	return 0;
+}
+
+static void ecm_function_cleanup(struct android_usb_function *f)
+{
+	kfree(f->config);
+	f->config = NULL;
+}
+
+static int ecm_qc_function_bind_config(struct android_usb_function *f,
+					struct usb_configuration *c)
+{
+	int ret;
+	struct ecm_function_config *ecm = f->config;
+
+	if (!ecm) {
+		pr_err("%s: ecm_pdata\n", __func__);
+		return -EINVAL;
+	}
+
+	pr_info("%s MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", __func__,
+		ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2],
+		ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]);
+
+	ret = gether_qc_setup_name(c->cdev->gadget, ecm->ethaddr, "ecm");
+	if (ret) {
+		pr_err("%s: gether_setup failed\n", __func__);
+		return ret;
+	}
+
+	return ecm_qc_bind_config(c, ecm->ethaddr);
+}
+
+static void ecm_qc_function_unbind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	gether_qc_cleanup();
+}
+
+static ssize_t ecm_ethaddr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct ecm_function_config *ecm = f->config;
+	return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		ecm->ethaddr[0], ecm->ethaddr[1], ecm->ethaddr[2],
+		ecm->ethaddr[3], ecm->ethaddr[4], ecm->ethaddr[5]);
+}
+
+static ssize_t ecm_ethaddr_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct android_usb_function *f = dev_get_drvdata(dev);
+	struct ecm_function_config *ecm = f->config;
+
+	if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+		    (int *)&ecm->ethaddr[0], (int *)&ecm->ethaddr[1],
+		    (int *)&ecm->ethaddr[2], (int *)&ecm->ethaddr[3],
+		    (int *)&ecm->ethaddr[4], (int *)&ecm->ethaddr[5]) == 6)
+		return size;
+	return -EINVAL;
+}
+
+static DEVICE_ATTR(ecm_ethaddr, S_IRUGO | S_IWUSR, ecm_ethaddr_show,
+					       ecm_ethaddr_store);
+
+static struct device_attribute *ecm_function_attributes[] = {
+	&dev_attr_ecm_ethaddr,
+	NULL
+};
+
+static struct android_usb_function ecm_qc_function = {
+	.name		= "ecm_qc",
+	.init		= ecm_function_init,
+	.cleanup	= ecm_function_cleanup,
+	.bind_config	= ecm_qc_function_bind_config,
+	.unbind_config	= ecm_qc_function_unbind_config,
+	.attributes	= ecm_function_attributes,
+};
 
 /* MBIM - used with BAM */
 #define MAX_MBIM_INSTANCES 1
@@ -1201,9 +1295,55 @@
 	.ctrlrequest	= accessory_function_ctrlrequest,
 };
 
+static int android_uasp_connect_cb(bool connect)
+{
+	/*
+	 * TODO
+	 * We may have to disable gadget till UASP configfs nodes
+	 * are configured which includes mapping LUN with the
+	 * backing file. It is a fundamental difference between
+	 * f_mass_storage and f_tcp. That means UASP can not be
+	 * in default composition.
+	 *
+	 * For now, assume that UASP configfs nodes are configured
+	 * before enabling android gadget. Or cable should be
+	 * reconnected after mapping the LUN.
+	 *
+	 * Also consider making UASP to respond to Host requests when
+	 * Lun is not mapped.
+	 */
+	pr_debug("UASP %s\n", connect ? "connect" : "disconnect");
+
+	return 0;
+}
+
+static int uasp_function_init(struct android_usb_function *f,
+					struct usb_composite_dev *cdev)
+{
+	return f_tcm_init(&android_uasp_connect_cb);
+}
+
+static void uasp_function_cleanup(struct android_usb_function *f)
+{
+	f_tcm_exit();
+}
+
+static int uasp_function_bind_config(struct android_usb_function *f,
+						struct usb_configuration *c)
+{
+	return tcm_bind_config(c);
+}
+
+static struct android_usb_function uasp_function = {
+	.name		= "uasp",
+	.init		= uasp_function_init,
+	.cleanup	= uasp_function_cleanup,
+	.bind_config	= uasp_function_bind_config,
+};
 
 static struct android_usb_function *supported_functions[] = {
 	&mbim_function,
+	&ecm_qc_function,
 	&rmnet_smd_function,
 	&rmnet_sdio_function,
 	&rmnet_smd_sdio_function,
@@ -1218,6 +1358,7 @@
 	&rndis_function,
 	&mass_storage_function,
 	&accessory_function,
+	&uasp_function,
 	NULL
 };
 
diff --git a/drivers/usb/gadget/f_diag.c b/drivers/usb/gadget/f_diag.c
index 72bff490..87597d5 100644
--- a/drivers/usb/gadget/f_diag.c
+++ b/drivers/usb/gadget/f_diag.c
@@ -20,7 +20,6 @@
 #include <linux/platform_device.h>
 
 #include <mach/usbdiag.h>
-#include <mach/rpc_hsusb.h>
 
 #include <linux/usb/composite.h>
 #include <linux/usb/gadget.h>
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
index 0394b0b..96790c5 100644
--- a/drivers/usb/gadget/f_mtp.c
+++ b/drivers/usb/gadget/f_mtp.c
@@ -788,7 +788,8 @@
 			/* wait for our last read to complete */
 			ret = wait_event_interruptible(dev->read_wq,
 				dev->rx_done || dev->state != STATE_BUSY);
-			if (dev->state == STATE_CANCELED) {
+			if (dev->state == STATE_CANCELED
+					|| dev->state == STATE_OFFLINE) {
 				r = -ECANCELED;
 				if (!dev->rx_done)
 					usb_ep_dequeue(dev->ep_out, read_req);
diff --git a/drivers/usb/gadget/f_qc_ecm.c b/drivers/usb/gadget/f_qc_ecm.c
new file mode 100644
index 0000000..98a29ae
--- /dev/null
+++ b/drivers/usb/gadget/f_qc_ecm.c
@@ -0,0 +1,869 @@
+/*
+ * f_qc_ecm.c -- USB CDC Ethernet (ECM) link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include "u_ether.h"
+#include "u_qc_ether.h"
+
+
+/*
+ * This function is a "CDC Ethernet Networking Control Model" (CDC ECM)
+ * Ethernet link.  The data transfer model is simple (packets sent and
+ * received over bulk endpoints using normal short packet termination),
+ * and the control model exposes various data and optional notifications.
+ *
+ * ECM is well standardized and (except for Microsoft) supported by most
+ * operating systems with USB host support.  It's the preferred interop
+ * solution for Ethernet over USB, at least for firmware based solutions.
+ * (Hardware solutions tend to be more minimalist.)  A newer and simpler
+ * "Ethernet Emulation Model" (CDC EEM) hasn't yet caught on.
+ *
+ * Note that ECM requires the use of "alternate settings" for its data
+ * interface.  This means that the set_alt() method has real work to do,
+ * and also means that a get_alt() method is required.
+ *
+ * This function is based on USB CDC Ethernet link function driver and
+ * contains MSM specific implementation.
+ */
+
+
+enum ecm_qc_notify_state {
+	ECM_QC_NOTIFY_NONE,		/* don't notify */
+	ECM_QC_NOTIFY_CONNECT,		/* issue CONNECT next */
+	ECM_QC_NOTIFY_SPEED,		/* issue SPEED_CHANGE next */
+};
+
+struct f_ecm_qc {
+	struct qc_gether			port;
+	u8				ctrl_id, data_id;
+
+	char				ethaddr[14];
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+	u8				notify_state;
+	bool				is_open;
+};
+
+static inline struct f_ecm_qc *func_to_ecm_qc(struct usb_function *f)
+{
+	return container_of(f, struct f_ecm_qc, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static inline unsigned ecm_qc_bitrate(struct usb_gadget *g)
+{
+	if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return 13 * 512 * 8 * 1000 * 8;
+	else
+		return 19 *  64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Include the status endpoint if we can, even though it's optional.
+ *
+ * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one
+ * packet, to simplify cancellation; and a big transfer interval, to
+ * waste less bandwidth.
+ *
+ * Some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even
+ * if they ignore the connect/disconnect notifications that real aether
+ * can provide.  More advanced cdc configurations might want to support
+ * encapsulated commands (vendor-specific, using control-OUT).
+ */
+
+#define ECM_QC_LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */
+#define ECM_QC_STATUS_BYTECOUNT		16	/* 8 byte header + data */
+
+/* currently only one std ecm instance is supported */
+#define ECM_QC_NO_PORTS						1
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor ecm_qc_control_intf = {
+	.bLength =		sizeof ecm_qc_control_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	/* status endpoint is optional; this could be patched later */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bInterfaceProtocol =	USB_CDC_PROTO_NONE,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc ecm_qc_header_desc = {
+	.bLength =		sizeof ecm_qc_header_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_union_desc ecm_qc_union_desc = {
+	.bLength =		sizeof(ecm_qc_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+static struct usb_cdc_ether_desc ecm_qc_desc = {
+	.bLength =		sizeof ecm_qc_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ETHERNET_TYPE,
+
+	/* this descriptor actually adds value, surprise! */
+	/* .iMACAddress = DYNAMIC */
+	.bmEthernetStatistics =	cpu_to_le32(0), /* no statistics */
+	.wMaxSegmentSize =	cpu_to_le16(ETH_FRAME_LEN),
+	.wNumberMCFilters =	cpu_to_le16(0),
+	.bNumberPowerFilters =	0,
+};
+
+/* the default data interface has no endpoints ... */
+
+static struct usb_interface_descriptor ecm_qc_data_nop_intf = {
+	.bLength =		sizeof ecm_qc_data_nop_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+/* ... but the "real" data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor ecm_qc_data_intf = {
+	.bLength =		sizeof ecm_qc_data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bInterfaceNumber =	1,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+	.bInterval =		1 << ECM_QC_LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor ecm_qc_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *ecm_qc_fs_function[] = {
+	/* CDC ECM control descriptors */
+	(struct usb_descriptor_header *) &ecm_qc_control_intf,
+	(struct usb_descriptor_header *) &ecm_qc_header_desc,
+	(struct usb_descriptor_header *) &ecm_qc_union_desc,
+	(struct usb_descriptor_header *) &ecm_qc_desc,
+	/* NOTE: status endpoint might need to be removed */
+	(struct usb_descriptor_header *) &ecm_qc_fs_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+	(struct usb_descriptor_header *) &ecm_qc_data_intf,
+	(struct usb_descriptor_header *) &ecm_qc_fs_in_desc,
+	(struct usb_descriptor_header *) &ecm_qc_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor ecm_qc_hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(ECM_QC_STATUS_BYTECOUNT),
+	.bInterval =		ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+static struct usb_endpoint_descriptor ecm_qc_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ecm_qc_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ecm_qc_hs_function[] = {
+	/* CDC ECM control descriptors */
+	(struct usb_descriptor_header *) &ecm_qc_control_intf,
+	(struct usb_descriptor_header *) &ecm_qc_header_desc,
+	(struct usb_descriptor_header *) &ecm_qc_union_desc,
+	(struct usb_descriptor_header *) &ecm_qc_desc,
+	/* NOTE: status endpoint might need to be removed */
+	(struct usb_descriptor_header *) &ecm_qc_hs_notify_desc,
+	/* data interface, altsettings 0 and 1 */
+	(struct usb_descriptor_header *) &ecm_qc_data_nop_intf,
+	(struct usb_descriptor_header *) &ecm_qc_data_intf,
+	(struct usb_descriptor_header *) &ecm_qc_hs_in_desc,
+	(struct usb_descriptor_header *) &ecm_qc_hs_out_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string ecm_qc_string_defs[] = {
+	[0].s = "CDC Ethernet Control Model (ECM)",
+	[1].s = NULL /* DYNAMIC */,
+	[2].s = "CDC Ethernet Data",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings ecm_qc_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		ecm_qc_string_defs,
+};
+
+static struct usb_gadget_strings *ecm_qc_strings[] = {
+	&ecm_qc_string_table,
+	NULL,
+};
+
+static struct data_port ecm_qc_bam_port;
+
+static int ecm_qc_bam_setup(void)
+{
+	int ret;
+
+	ret = bam_data_setup(ECM_QC_NO_PORTS);
+	if (ret) {
+		pr_err("bam_data_setup failed err: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ecm_qc_bam_connect(struct f_ecm_qc *dev)
+{
+	int ret;
+
+	ecm_qc_bam_port.func = dev->port.func;
+	ecm_qc_bam_port.in = dev->port.in_ep;
+	ecm_qc_bam_port.out = dev->port.out_ep;
+
+	/* currently we use the first connection */
+	ret = bam_data_connect(&ecm_qc_bam_port, 0, 0);
+	if (ret) {
+		pr_err("bam_data_connect failed: err:%d\n",
+				ret);
+		return ret;
+	} else {
+		pr_info("ecm bam connected\n");
+	}
+
+	return 0;
+}
+
+static int ecm_qc_bam_disconnect(struct f_ecm_qc *dev)
+{
+	pr_debug("dev:%p. %s Do nothing.\n",
+			 dev, __func__);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void ecm_qc_do_notify(struct f_ecm_qc *ecm)
+{
+	struct usb_request		*req = ecm->notify_req;
+	struct usb_cdc_notification	*event;
+	struct usb_composite_dev	*cdev = ecm->port.func.config->cdev;
+	__le32				*data;
+	int				status;
+
+	/* notification already in flight? */
+	if (!req)
+		return;
+
+	event = req->buf;
+	switch (ecm->notify_state) {
+	case ECM_QC_NOTIFY_NONE:
+		return;
+
+	case ECM_QC_NOTIFY_CONNECT:
+		event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION;
+		if (ecm->is_open)
+			event->wValue = cpu_to_le16(1);
+		else
+			event->wValue = cpu_to_le16(0);
+		event->wLength = 0;
+		req->length = sizeof *event;
+
+		DBG(cdev, "notify connect %s\n",
+				ecm->is_open ? "true" : "false");
+		ecm->notify_state = ECM_QC_NOTIFY_SPEED;
+		break;
+
+	case ECM_QC_NOTIFY_SPEED:
+		event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE;
+		event->wValue = cpu_to_le16(0);
+		event->wLength = cpu_to_le16(8);
+		req->length = ECM_QC_STATUS_BYTECOUNT;
+
+		/* SPEED_CHANGE data is up/down speeds in bits/sec */
+		data = req->buf + sizeof *event;
+		data[0] = cpu_to_le32(ecm_qc_bitrate(cdev->gadget));
+		data[1] = data[0];
+
+		DBG(cdev, "notify speed %d\n", ecm_qc_bitrate(cdev->gadget));
+		ecm->notify_state = ECM_QC_NOTIFY_NONE;
+		break;
+	}
+	event->bmRequestType = 0xA1;
+	event->wIndex = cpu_to_le16(ecm->ctrl_id);
+
+	ecm->notify_req = NULL;
+	status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC);
+	if (status < 0) {
+		ecm->notify_req = req;
+		DBG(cdev, "notify --> %d\n", status);
+	}
+}
+
+static void ecm_qc_notify(struct f_ecm_qc *ecm)
+{
+	/* NOTE on most versions of Linux, host side cdc-ethernet
+	 * won't listen for notifications until its netdevice opens.
+	 * The first notification then sits in the FIFO for a long
+	 * time, and the second one is queued.
+	 */
+	ecm->notify_state = ECM_QC_NOTIFY_CONNECT;
+	ecm_qc_do_notify(ecm);
+}
+
+static void ecm_qc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_ecm_qc			*ecm = req->context;
+
+	switch (req->status) {
+	case 0:
+		/* no fault */
+		break;
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		ecm->notify_state = ECM_QC_NOTIFY_NONE;
+		break;
+	default:
+		DBG(cdev, "event %02x --> %d\n",
+			event->bNotificationType, req->status);
+		break;
+	}
+	ecm->notify_req = req;
+	ecm_qc_do_notify(ecm);
+}
+
+static int ecm_qc_setup(struct usb_function *f,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 */
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SET_ETHERNET_PACKET_FILTER:
+		/* see 6.2.30: no data, wIndex = interface,
+		 * wValue = packet filter bitmap
+		 */
+		if (w_length != 0 || w_index != ecm->ctrl_id)
+			goto invalid;
+		DBG(cdev, "packet filter %02x\n", w_value);
+		/* REVISIT locking of cdc_filter.  This assumes the UDC
+		 * driver won't have a concurrent packet TX irq running on
+		 * another CPU; or that if it does, this write is atomic...
+		 */
+		ecm->port.cdc_filter = w_value;
+		value = 0;
+		break;
+
+	/* and optionally:
+	 * case USB_CDC_SEND_ENCAPSULATED_COMMAND:
+	 * case USB_CDC_GET_ENCAPSULATED_RESPONSE:
+	 * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS:
+	 * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER:
+	 * case USB_CDC_GET_ETHERNET_STATISTIC:
+	 */
+
+	default:
+invalid:
+		DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("ecm req %02x.%02x response err %d\n",
+					ctrl->bRequestType, ctrl->bRequest,
+					value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+
+static int ecm_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	/* Control interface has only altsetting 0 */
+	if (intf == ecm->ctrl_id) {
+		if (alt != 0)
+			goto fail;
+
+		if (ecm->notify->driver_data) {
+			VDBG(cdev, "reset ecm control %d\n", intf);
+			usb_ep_disable(ecm->notify);
+		}
+		if (!(ecm->notify->desc)) {
+			VDBG(cdev, "init ecm ctrl %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, ecm->notify))
+				goto fail;
+		}
+		usb_ep_enable(ecm->notify);
+		ecm->notify->driver_data = ecm;
+
+	/* Data interface has two altsettings, 0 and 1 */
+	} else if (intf == ecm->data_id) {
+		if (alt > 1)
+			goto fail;
+
+		if (ecm->port.in_ep->driver_data) {
+			DBG(cdev, "reset ecm\n");
+			gether_qc_disconnect(&ecm->port);
+			ecm_qc_bam_disconnect(ecm);
+		}
+
+		if (!ecm->port.in_ep->desc ||
+		    !ecm->port.out_ep->desc) {
+			DBG(cdev, "init ecm\n");
+			if (config_ep_by_speed(cdev->gadget, f,
+					       ecm->port.in_ep) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       ecm->port.out_ep)) {
+				ecm->port.in_ep->desc = NULL;
+				ecm->port.out_ep->desc = NULL;
+				goto fail;
+			}
+		}
+
+		/* CDC Ethernet only sends data in non-default altsettings.
+		 * Changing altsettings resets filters, statistics, etc.
+		 */
+		if (alt == 1) {
+			struct net_device	*net;
+
+			/* Enable zlps by default for ECM conformance;
+			 * override for musb_hdrc (avoids txdma ovhead).
+			 */
+			ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
+				);
+			ecm->port.cdc_filter = DEFAULT_FILTER;
+			DBG(cdev, "activate ecm\n");
+			net = gether_qc_connect(&ecm->port);
+			if (IS_ERR(net))
+				return PTR_ERR(net);
+
+			if (ecm_qc_bam_connect(ecm))
+				goto fail;
+		}
+
+		/* NOTE this can be a minor disagreement with the ECM spec,
+		 * which says speed notifications will "always" follow
+		 * connection notifications.  But we allow one connect to
+		 * follow another (if the first is in flight), and instead
+		 * just guarantee that a speed notification is always sent.
+		 */
+		ecm_qc_notify(ecm);
+	} else
+		goto fail;
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+/* Because the data interface supports multiple altsettings,
+ * this ECM function *MUST* implement a get_alt() method.
+ */
+static int ecm_qc_get_alt(struct usb_function *f, unsigned intf)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+
+	if (intf == ecm->ctrl_id)
+		return 0;
+	return ecm->port.in_ep->driver_data ? 1 : 0;
+}
+
+static void ecm_qc_disable(struct usb_function *f)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+
+	DBG(cdev, "ecm deactivated\n");
+
+	if (ecm->port.in_ep->driver_data) {
+		gether_qc_disconnect(&ecm->port);
+		ecm_qc_bam_disconnect(ecm);
+	}
+
+	if (ecm->notify->driver_data) {
+		usb_ep_disable(ecm->notify);
+		ecm->notify->driver_data = NULL;
+		ecm->notify->desc = NULL;
+	}
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Callbacks let us notify the host about connect/disconnect when the
+ * net device is opened or closed.
+ *
+ * For testing, note that link states on this side include both opened
+ * and closed variants of:
+ *
+ *   - disconnected/unconfigured
+ *   - configured but inactive (data alt 0)
+ *   - configured and active (data alt 1)
+ *
+ * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and
+ * SET_INTERFACE (altsetting).  Remember also that "configured" doesn't
+ * imply the host is actually polling the notification endpoint, and
+ * likewise that "active" doesn't imply it's actually using the data
+ * endpoints for traffic.
+ */
+
+static void ecm_qc_open(struct qc_gether *geth)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(&geth->func);
+	DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+	ecm->is_open = true;
+	ecm_qc_notify(ecm);
+}
+
+static void ecm_qc_close(struct qc_gether *geth)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(&geth->func);
+
+	DBG(ecm->port.func.config->cdev, "%s\n", __func__);
+
+	ecm->is_open = false;
+	ecm_qc_notify(ecm);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+ecm_qc_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ecm->ctrl_id = status;
+
+	ecm_qc_control_intf.bInterfaceNumber = status;
+	ecm_qc_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ecm->data_id = status;
+
+	ecm_qc_data_nop_intf.bInterfaceNumber = status;
+	ecm_qc_data_intf.bInterfaceNumber = status;
+	ecm_qc_union_desc.bSlaveInterface0 = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_in_desc);
+	if (!ep)
+		goto fail;
+
+	ecm->port.in_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_out_desc);
+	if (!ep)
+		goto fail;
+
+	ecm->port.out_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* NOTE:  a status/notification endpoint is *OPTIONAL* but we
+	 * don't treat it that way.  It's simpler, and some newer CDC
+	 * profiles (wireless handsets) no longer treat it as optional.
+	 */
+	ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_notify_desc);
+	if (!ep)
+		goto fail;
+	ecm->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* allocate notification request and buffer */
+	ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!ecm->notify_req)
+		goto fail;
+	ecm->notify_req->buf = kmalloc(ECM_QC_STATUS_BYTECOUNT, GFP_KERNEL);
+	if (!ecm->notify_req->buf)
+		goto fail;
+	ecm->notify_req->context = ecm;
+	ecm->notify_req->complete = ecm_qc_notify_complete;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(ecm_qc_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		ecm_qc_hs_in_desc.bEndpointAddress =
+				ecm_qc_fs_in_desc.bEndpointAddress;
+		ecm_qc_hs_out_desc.bEndpointAddress =
+				ecm_qc_fs_out_desc.bEndpointAddress;
+		ecm_qc_hs_notify_desc.bEndpointAddress =
+				ecm_qc_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(ecm_qc_hs_function);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	/* NOTE:  all that is done without knowing or caring about
+	 * the network link ... which is unavailable to this code
+	 * until we're activated via set_alt().
+	 */
+
+	ecm->port.open = ecm_qc_open;
+	ecm->port.close = ecm_qc_close;
+
+	DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			ecm->port.in_ep->name, ecm->port.out_ep->name,
+			ecm->notify->name);
+	return 0;
+
+fail:
+	if (f->descriptors)
+		usb_free_descriptors(f->descriptors);
+
+	if (ecm->notify_req) {
+		kfree(ecm->notify_req->buf);
+		usb_ep_free_request(ecm->notify, ecm->notify_req);
+	}
+
+	/* we might as well release our claims on endpoints */
+	if (ecm->notify)
+		ecm->notify->driver_data = NULL;
+	if (ecm->port.out_ep->desc)
+		ecm->port.out_ep->driver_data = NULL;
+	if (ecm->port.in_ep->desc)
+		ecm->port.in_ep->driver_data = NULL;
+
+	pr_err("%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void
+ecm_qc_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_ecm_qc		*ecm = func_to_ecm_qc(f);
+
+	DBG(c->cdev, "ecm unbind\n");
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	kfree(ecm->notify_req->buf);
+	usb_ep_free_request(ecm->notify, ecm->notify_req);
+
+	ecm_qc_string_defs[1].s = NULL;
+	kfree(ecm);
+}
+
+/**
+ * ecm_qc_bind_config - add CDC Ethernet network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ *	side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_qc_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+int
+ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
+{
+	struct f_ecm_qc		*ecm;
+	int		status;
+
+	if (!can_support_ecm(c->cdev->gadget) || !ethaddr)
+		return -EINVAL;
+
+	status = ecm_qc_bam_setup();
+	if (status) {
+		pr_err("bam setup failed");
+		return status;
+	}
+
+	/* maybe allocate device-global string IDs */
+	if (ecm_qc_string_defs[0].id == 0) {
+
+		/* control interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ecm_qc_string_defs[0].id = status;
+		ecm_qc_control_intf.iInterface = status;
+
+		/* data interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ecm_qc_string_defs[2].id = status;
+		ecm_qc_data_intf.iInterface = status;
+
+		/* MAC address */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		ecm_qc_string_defs[1].id = status;
+		ecm_qc_desc.iMACAddress = status;
+	}
+
+	/* allocate and initialize one new instance */
+	ecm = kzalloc(sizeof *ecm, GFP_KERNEL);
+	if (!ecm)
+		return -ENOMEM;
+
+	/* export host's Ethernet address in CDC format */
+	snprintf(ecm->ethaddr, sizeof ecm->ethaddr,
+		"%02X%02X%02X%02X%02X%02X",
+		ethaddr[0], ethaddr[1], ethaddr[2],
+		ethaddr[3], ethaddr[4], ethaddr[5]);
+	ecm_qc_string_defs[1].s = ecm->ethaddr;
+
+	ecm->port.cdc_filter = DEFAULT_FILTER;
+
+	ecm->port.func.name = "cdc_ethernet";
+	ecm->port.func.strings = ecm_qc_strings;
+	/* descriptors are per-instance copies */
+	ecm->port.func.bind = ecm_qc_bind;
+	ecm->port.func.unbind = ecm_qc_unbind;
+	ecm->port.func.set_alt = ecm_qc_set_alt;
+	ecm->port.func.get_alt = ecm_qc_get_alt;
+	ecm->port.func.setup = ecm_qc_setup;
+	ecm->port.func.disable = ecm_qc_disable;
+
+	status = usb_add_function(c, &ecm->port.func);
+	if (status) {
+		ecm_qc_string_defs[1].s = NULL;
+		kfree(ecm);
+	}
+	return status;
+}
diff --git a/drivers/usb/gadget/f_tcm.c b/drivers/usb/gadget/f_tcm.c
index d944745..8777504 100644
--- a/drivers/usb/gadget/f_tcm.c
+++ b/drivers/usb/gadget/f_tcm.c
@@ -255,7 +255,6 @@
 {
 	struct f_uas *fu = cmd->fu;
 	struct se_cmd *se_cmd = &cmd->se_cmd;
-	struct usb_gadget *gadget = fuas_to_gadget(fu);
 	int ret;
 
 	init_completion(&cmd->write_complete);
@@ -266,22 +265,6 @@
 		return -EINVAL;
 	}
 
-	if (!gadget->sg_supported) {
-		cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL);
-		if (!cmd->data_buf)
-			return -ENOMEM;
-
-		fu->bot_req_out->buf = cmd->data_buf;
-	} else {
-		fu->bot_req_out->buf = NULL;
-		fu->bot_req_out->num_sgs = se_cmd->t_data_nents;
-		fu->bot_req_out->sg = se_cmd->t_data_sg;
-	}
-
-	fu->bot_req_out->complete = usbg_data_write_cmpl;
-	fu->bot_req_out->length = se_cmd->data_length;
-	fu->bot_req_out->context = cmd;
-
 	ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
 	if (ret)
 		goto cleanup;
diff --git a/drivers/usb/gadget/msm72k_udc.c b/drivers/usb/gadget/msm72k_udc.c
index 297c183..55fd59e 100644
--- a/drivers/usb/gadget/msm72k_udc.c
+++ b/drivers/usb/gadget/msm72k_udc.c
@@ -702,6 +702,14 @@
 
 	spin_lock_irqsave(&ui->lock, flags);
 
+	if (ept->num != 0 && ept->ep.desc == NULL) {
+		req->req.status = -EINVAL;
+		spin_unlock_irqrestore(&ui->lock, flags);
+		dev_err(&ui->pdev->dev,
+			"%s: called for disabled endpoint\n", __func__);
+		return -EINVAL;
+	}
+
 	if (req->busy) {
 		req->req.status = -EBUSY;
 		spin_unlock_irqrestore(&ui->lock, flags);
diff --git a/drivers/usb/gadget/u_bam.c b/drivers/usb/gadget/u_bam.c
index d379c66..1fade88 100644
--- a/drivers/usb/gadget/u_bam.c
+++ b/drivers/usb/gadget/u_bam.c
@@ -24,6 +24,7 @@
 #include <linux/termios.h>
 
 #include <mach/usb_gadget_xport.h>
+#include <linux/usb/msm_hsusb.h>
 #include <mach/usb_bam.h>
 
 #include "u_rmnet.h"
diff --git a/drivers/usb/gadget/u_qc_ether.c b/drivers/usb/gadget/u_qc_ether.c
new file mode 100644
index 0000000..20933b6
--- /dev/null
+++ b/drivers/usb/gadget/u_qc_ether.c
@@ -0,0 +1,409 @@
+/*
+ * u_qc_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "u_ether.h"
+
+
+/*
+ * This component encapsulates the Ethernet link glue needed to provide
+ * one (!) network link through the USB gadget stack, normally "usb0".
+ *
+ * The control and data models are handled by the function driver which
+ * connects to this code; such as CDC Ethernet (ECM or EEM),
+ * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
+ * management.
+ *
+ * Link level addressing is handled by this component using module
+ * parameters; if no such parameters are provided, random link level
+ * addresses are used.  Each end of the link uses one address.  The
+ * host end address is exported in various ways, and is often recorded
+ * in configuration databases.
+ *
+ * The driver which assembles each configuration using such a link is
+ * responsible for ensuring that each configuration includes at most one
+ * instance of is network link.  (The network layer provides ways for
+ * this single "physical" link to be used by multiple virtual links.)
+ *
+ * This utilities is based on Ethernet-over-USB link layer utilities and
+ * contains MSM specific implementation.
+ */
+
+#define UETH__VERSION	"29-May-2008"
+
+struct eth_qc_dev {
+	/* lock is held while accessing port_usb
+	 * or updating its backlink port_usb->ioport
+	 */
+	spinlock_t		lock;
+	struct qc_gether		*port_usb;
+
+	struct net_device	*net;
+	struct usb_gadget	*gadget;
+
+	unsigned		header_len;
+
+	bool			zlp;
+	u8			host_mac[ETH_ALEN];
+};
+
+/*-------------------------------------------------------------------------*/
+
+#undef DBG
+#undef VDBG
+#undef ERROR
+#undef INFO
+
+#define xprintk(d, level, fmt, args...) \
+	printk(level "%s: " fmt , (d)->net->name , ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DBG(dev, fmt, args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDBG	DBG
+#else
+#define VDBG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define INFO(dev, fmt, args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+static int ueth_qc_change_mtu(struct net_device *net, int new_mtu)
+{
+	struct eth_qc_dev	*dev = netdev_priv(net);
+	unsigned long	flags;
+	int		status = 0;
+
+	/* don't change MTU on "live" link (peer won't know) */
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb)
+		status = -EBUSY;
+	else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+		status = -ERANGE;
+	else
+		net->mtu = new_mtu;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return status;
+}
+
+static void eth_qc_get_drvinfo(struct net_device *net,
+						struct ethtool_drvinfo *p)
+{
+	struct eth_qc_dev	*dev = netdev_priv(net);
+
+	strlcpy(p->driver, "g_qc_ether", sizeof p->driver);
+	strlcpy(p->version, UETH__VERSION, sizeof p->version);
+	strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
+	strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
+}
+
+static const struct ethtool_ops qc_ethtool_ops = {
+	.get_drvinfo = eth_qc_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+};
+
+static netdev_tx_t eth_qc_start_xmit(struct sk_buff *skb,
+					struct net_device *net)
+{
+	return NETDEV_TX_OK;
+}
+
+static int eth_qc_open(struct net_device *net)
+{
+	struct eth_qc_dev	*dev = netdev_priv(net);
+	struct qc_gether	*link;
+
+	DBG(dev, "%s\n", __func__);
+	if (netif_carrier_ok(dev->net)) {
+		/* Force the netif to send the RTM_NEWLINK event
+		 * that in use to notify on the USB cable status.
+		 */
+		netif_carrier_off(dev->net);
+		netif_carrier_on(dev->net);
+		netif_wake_queue(dev->net);
+	}
+
+	spin_lock_irq(&dev->lock);
+	link = dev->port_usb;
+	if (link && link->open)
+		link->open(link);
+	spin_unlock_irq(&dev->lock);
+
+	return 0;
+}
+
+static int eth_qc_stop(struct net_device *net)
+{
+	struct eth_qc_dev	*dev = netdev_priv(net);
+	unsigned long	flags;
+	struct qc_gether	*link = dev->port_usb;
+
+	VDBG(dev, "%s\n", __func__);
+	netif_stop_queue(net);
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb && link->close)
+			link->close(link);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *qc_dev_addr;
+module_param(qc_dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(qc_dev_addr, "QC Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *qc_host_addr;
+module_param(qc_host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(qc_host_addr, "QC Host Ethernet Address");
+
+static int get_qc_ether_addr(const char *str, u8 *dev_addr)
+{
+	if (str) {
+		unsigned	i;
+
+		for (i = 0; i < 6; i++) {
+			unsigned char num;
+
+			if ((*str == '.') || (*str == ':'))
+				str++;
+			num = hex_to_bin(*str++) << 4;
+			num |= hex_to_bin(*str++);
+			dev_addr[i] = num;
+		}
+		if (is_valid_ether_addr(dev_addr))
+			return 0;
+	}
+	random_ether_addr(dev_addr);
+	return 1;
+}
+
+static struct eth_qc_dev *qc_dev;
+
+static const struct net_device_ops eth_qc_netdev_ops = {
+	.ndo_open		= eth_qc_open,
+	.ndo_stop		= eth_qc_stop,
+	.ndo_start_xmit		= eth_qc_start_xmit,
+	.ndo_change_mtu		= ueth_qc_change_mtu,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static struct device_type qc_gadget_type = {
+	.name	= "gadget",
+};
+
+/**
+ * gether_qc_setup - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ *	host side of the link is recorded
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework.  The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_qc_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+{
+	return gether_qc_setup_name(g, ethaddr, "usb");
+}
+
+/**
+ * gether_qc_setup_name - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ *	host side of the link is recorded
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework.  The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_qc_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+		const char *netname)
+{
+	struct eth_qc_dev		*dev;
+	struct net_device	*net;
+	int			status;
+
+	if (qc_dev)
+		return -EBUSY;
+
+	net = alloc_etherdev(sizeof *dev);
+	if (!net)
+		return -ENOMEM;
+
+	dev = netdev_priv(net);
+	spin_lock_init(&dev->lock);
+
+	/* network device setup */
+	dev->net = net;
+	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+
+	if (get_qc_ether_addr(qc_dev_addr, net->dev_addr))
+		dev_warn(&g->dev,
+			"using random %s ethernet address\n", "self");
+	if (get_qc_ether_addr(qc_host_addr, dev->host_mac))
+		dev_warn(&g->dev,
+			"using random %s ethernet address\n", "host");
+
+	if (ethaddr)
+		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
+
+	net->netdev_ops = &eth_qc_netdev_ops;
+
+	SET_ETHTOOL_OPS(net, &qc_ethtool_ops);
+
+	netif_carrier_off(net);
+
+	dev->gadget = g;
+	SET_NETDEV_DEV(net, &g->dev);
+	SET_NETDEV_DEVTYPE(net, &qc_gadget_type);
+
+	status = register_netdev(net);
+	if (status < 0) {
+		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+		free_netdev(net);
+	} else {
+		INFO(dev, "MAC %pM\n", net->dev_addr);
+		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+		qc_dev = dev;
+	}
+
+	return status;
+}
+
+/**
+ * gether_qc_cleanup - remove Ethernet-over-USB device
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @gether_qc_setup().
+ */
+void gether_qc_cleanup(void)
+{
+	if (!qc_dev)
+		return;
+
+	unregister_netdev(qc_dev->net);
+	free_netdev(qc_dev->net);
+
+	qc_dev = NULL;
+}
+
+
+/**
+ * gether_qc_connect - notify network layer that USB link is active
+ * @link: the USB link, set up with endpoints, descriptors matching
+ *	current device speed, and any framing wrapper(s) set up.
+ * Context: irqs blocked
+ *
+ * This is called to let the network layer know the connection
+ * is active ("carrier detect").
+ */
+struct net_device *gether_qc_connect(struct qc_gether *link)
+{
+	struct eth_qc_dev		*dev = qc_dev;
+
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
+	dev->zlp = link->is_zlp_ok;
+	dev->header_len = link->header_len;
+
+	spin_lock(&dev->lock);
+	dev->port_usb = link;
+	link->ioport = dev;
+	if (netif_running(dev->net)) {
+		if (link->open)
+			link->open(link);
+	} else {
+		if (link->close)
+			link->close(link);
+	}
+	spin_unlock(&dev->lock);
+
+	netif_carrier_on(dev->net);
+	if (netif_running(dev->net))
+		netif_wake_queue(dev->net);
+
+	return dev->net;
+}
+
+/**
+ * gether_qc_disconnect - notify network layer that USB link is inactive
+ * @link: the USB link, on which gether_connect() was called
+ * Context: irqs blocked
+ *
+ * This is called to let the network layer know the connection
+ * went inactive ("no carrier").
+ *
+ * On return, the state is as if gether_connect() had never been called.
+ */
+void gether_qc_disconnect(struct qc_gether *link)
+{
+	struct eth_qc_dev		*dev = link->ioport;
+
+	if (!dev)
+		return;
+
+	DBG(dev, "%s\n", __func__);
+
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+
+	spin_lock(&dev->lock);
+	dev->port_usb = NULL;
+	link->ioport = NULL;
+	spin_unlock(&dev->lock);
+}
diff --git a/drivers/usb/gadget/u_qc_ether.h b/drivers/usb/gadget/u_qc_ether.h
new file mode 100644
index 0000000..b3c281b
--- /dev/null
+++ b/drivers/usb/gadget/u_qc_ether.h
@@ -0,0 +1,97 @@
+/*
+ * u_qc_ether.h -- interface to USB gadget "ethernet link" utilities
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef __U_QC_ETHER_H
+#define __U_QC_ETHER_H
+
+#include <linux/err.h>
+#include <linux/if_ether.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+
+#include "gadget_chips.h"
+
+
+/*
+ * This represents the USB side of an "ethernet" link, managed by a USB
+ * function which provides control and (maybe) framing.  Two functions
+ * in different configurations could share the same ethernet link/netdev,
+ * using different host interaction models.
+ *
+ * There is a current limitation that only one instance of this link may
+ * be present in any given configuration.  When that's a problem, network
+ * layer facilities can be used to package multiple logical links on this
+ * single "physical" one.
+ *
+ * This function is based on Ethernet-over-USB link layer utilities and
+ * contains MSM specific implementation.
+ */
+
+struct qc_gether {
+	struct usb_function		func;
+
+	/* updated by gether_{connect,disconnect} */
+	struct eth_qc_dev			*ioport;
+
+	/* endpoints handle full and/or high speeds */
+	struct usb_ep			*in_ep;
+	struct usb_ep			*out_ep;
+
+	bool				is_zlp_ok;
+
+	u16				cdc_filter;
+
+	/* hooks for added framing, as needed for RNDIS and EEM. */
+	u32				header_len;
+	/* NCM requires fixed size bundles */
+	bool				is_fixed;
+	u32				fixed_out_len;
+	u32				fixed_in_len;
+	struct sk_buff			*(*wrap)(struct qc_gether *port,
+						struct sk_buff *skb);
+	int				(*unwrap)(struct qc_gether *port,
+						struct sk_buff *skb,
+						struct sk_buff_head *list);
+
+	/* called on network open/close */
+	void				(*open)(struct qc_gether *);
+	void				(*close)(struct qc_gether *);
+};
+
+/* netdev setup/teardown as directed by the gadget driver */
+int gether_qc_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]);
+void gether_qc_cleanup(void);
+/* variant of gether_setup that allows customizing network device name */
+int gether_qc_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+		const char *netname);
+
+/* connect/disconnect is handled by individual functions */
+struct net_device *gether_qc_connect(struct qc_gether *);
+void gether_qc_disconnect(struct qc_gether *);
+
+/* each configuration may bind one instance of an ethernet link */
+int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+
+int
+rndis_qc_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+					 u32 vendorID, const char *manufacturer,
+					 u8 maxPktPerXfer);
+#endif /* __U_QC_ETHER_H */
diff --git a/drivers/usb/gadget/u_rmnet_ctrl_smd.c b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
index 0256a75..169008b 100644
--- a/drivers/usb/gadget/u_rmnet_ctrl_smd.c
+++ b/drivers/usb/gadget/u_rmnet_ctrl_smd.c
@@ -326,6 +326,8 @@
 			container_of(w, struct rmnet_ctrl_port, connect_w.work);
 	struct smd_ch_info *c = &port->ctrl_ch;
 	unsigned long flags;
+	int	set_bits = 0;
+	int	clear_bits = 0;
 	int ret;
 
 	pr_debug("%s:\n", __func__);
@@ -348,9 +350,11 @@
 		return;
 	}
 
+	set_bits = c->cbits_tomodem;
+	clear_bits = ~(c->cbits_tomodem | TIOCM_RTS);
 	spin_lock_irqsave(&port->port_lock, flags);
 	if (port->port_usb)
-		smd_tiocmset(c->ch, c->cbits_tomodem, ~c->cbits_tomodem);
+		smd_tiocmset(c->ch, set_bits, clear_bits);
 	spin_unlock_irqrestore(&port->port_lock, flags);
 }
 
diff --git a/drivers/usb/otg/msm_otg.c b/drivers/usb/otg/msm_otg.c
index 487bc59..da96e73 100644
--- a/drivers/usb/otg/msm_otg.c
+++ b/drivers/usb/otg/msm_otg.c
@@ -340,15 +340,28 @@
 {
 	int ret;
 
-	if (IS_ERR(motg->clk))
-		return 0;
-
 	if (assert) {
-		ret = clk_reset(motg->clk, CLK_RESET_ASSERT);
+		if (!IS_ERR(motg->clk)) {
+			ret = clk_reset(motg->clk, CLK_RESET_ASSERT);
+		} else {
+			/* Using asynchronous block reset to the hardware */
+			dev_dbg(motg->phy.dev, "block_reset ASSERT\n");
+			clk_disable_unprepare(motg->pclk);
+			clk_disable_unprepare(motg->core_clk);
+			ret = clk_reset(motg->core_clk, CLK_RESET_ASSERT);
+		}
 		if (ret)
 			dev_err(motg->phy.dev, "usb hs_clk assert failed\n");
 	} else {
-		ret = clk_reset(motg->clk, CLK_RESET_DEASSERT);
+		if (!IS_ERR(motg->clk)) {
+			ret = clk_reset(motg->clk, CLK_RESET_DEASSERT);
+		} else {
+			dev_dbg(motg->phy.dev, "block_reset DEASSERT\n");
+			ret = clk_reset(motg->core_clk, CLK_RESET_DEASSERT);
+			ndelay(200);
+			clk_prepare_enable(motg->core_clk);
+			clk_prepare_enable(motg->pclk);
+		}
 		if (ret)
 			dev_err(motg->phy.dev, "usb hs_clk deassert failed\n");
 	}
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 68500a3..624c36d 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -360,7 +360,7 @@
 
 	usb_mark_last_busy(port->serial->dev);
 
-	if (!status && urb->actual_length) {
+	if ((status == -ENOENT || !status) && urb->actual_length) {
 		spin_lock_irqsave(&portdata->in_lock, flags);
 		list_add_tail(&urb->urb_list, &portdata->in_urb_list);
 		spin_unlock_irqrestore(&portdata->in_lock, flags);
diff --git a/drivers/video/msm/Kconfig b/drivers/video/msm/Kconfig
index 7e078ab..54d7090 100644
--- a/drivers/video/msm/Kconfig
+++ b/drivers/video/msm/Kconfig
@@ -405,6 +405,15 @@
 	select FB_MSM_MIPI_DSI_SIMULATOR
 	default n
 
+config FB_MSM_NO_MDP_PIPE_CTRL
+	depends on FB_MSM_OVERLAY
+	bool "Do not use mdp_pipe_ctrl"
+	---help---
+	  Saying 'Y' here obsoletes the mdp_pipe_ctrl function,
+	  which was used to control mdp-related clocks. MDP4 vsync-driven
+	  screen updates will use a different clock control mechanism if
+	  this is selected.
+
 config FB_MSM_OVERLAY0_WRITEBACK
 	depends on FB_MSM_OVERLAY
         bool "MDP overlay0 write back mode enable"
diff --git a/drivers/video/msm/Makefile b/drivers/video/msm/Makefile
index a0f9e02..e49e2ba 100644
--- a/drivers/video/msm/Makefile
+++ b/drivers/video/msm/Makefile
@@ -14,7 +14,6 @@
 
 ifeq ($(CONFIG_FB_MSM_MDP40),y)
 obj-y += mdp4_util.o
-obj-y += mdp4_hsic.o
 else
 obj-y += mdp_hw_init.o
 obj-y += mdp_ppp.o
diff --git a/drivers/video/msm/external_common.c b/drivers/video/msm/external_common.c
index 9f301fe..46ef7b4 100644
--- a/drivers/video/msm/external_common.c
+++ b/drivers/video/msm/external_common.c
@@ -1981,8 +1981,8 @@
 	struct fb_var_screeninfo *var = &mfd->fbi->var;
 	bool changed = TRUE;
 
-	if (var->reserved[2]) {
-		format = var->reserved[2]-1;
+	if (var->reserved[3]) {
+		format = var->reserved[3]-1;
 		DEV_DBG("reserved format is %d\n", format);
 	} else if (hdmi_prim_resolution) {
 		format = hdmi_prim_resolution - 1;
diff --git a/drivers/video/msm/hdmi_msm.c b/drivers/video/msm/hdmi_msm.c
index 4a1427d..26e5687 100644
--- a/drivers/video/msm/hdmi_msm.c
+++ b/drivers/video/msm/hdmi_msm.c
@@ -828,16 +828,17 @@
 		DEV_INFO("HDMI HPD: QDSP OFF\n");
 		kobject_uevent_env(external_common_state->uevent_kobj,
 				   KOBJ_CHANGE, envp);
-		switch_set_state(&external_common_state->sdev, 0);
-		DEV_INFO("Hdmi state switch to %d: %s\n",
-			external_common_state->sdev.state,  __func__);
 		if (hpd_state) {
 			/* Build EDID table */
 			hdmi_msm_read_edid();
 #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT
 			hdmi_msm_state->reauth = FALSE ;
 #endif
-			DEV_INFO("HDMI HPD: sense CONNECTED: send ONLINE\n");
+			switch_set_state(&external_common_state->sdev, 1);
+			DEV_INFO("Hdmi state switched to %d: %s\n",
+				external_common_state->sdev.state,  __func__);
+
+			DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n");
 			kobject_uevent(external_common_state->uevent_kobj,
 				KOBJ_ONLINE);
 			switch_set_state(&external_common_state->sdev, 1);
@@ -850,18 +851,15 @@
 			DEV_INFO("HDMI HPD: sense : send HDCP_PASS\n");
 			kobject_uevent_env(external_common_state->uevent_kobj,
 				KOBJ_CHANGE, envp);
-			switch_set_state(&external_common_state->sdev, 1);
-			DEV_INFO("Hdmi state switch to %d: %s\n",
-				external_common_state->sdev.state, __func__);
 #endif
 		} else {
-			DEV_INFO("HDMI HPD: sense DISCONNECTED: send OFFLINE\n"
-				);
+			switch_set_state(&external_common_state->sdev, 0);
+			DEV_INFO("Hdmi state switched to %d: %s\n",
+				external_common_state->sdev.state,  __func__);
+
+			DEV_INFO("HDMI HPD: DISCONNECTED: send OFFLINE\n");
 			kobject_uevent(external_common_state->uevent_kobj,
 				KOBJ_OFFLINE);
-			switch_set_state(&external_common_state->sdev, 0);
-			DEV_INFO("Hdmi state switch to %d: %s\n",
-				external_common_state->sdev.state,  __func__);
 		}
 	}
 
@@ -1087,14 +1085,16 @@
 		DEV_INFO("HDCP: AUTH_FAIL_INT received, LINK0_STATUS=0x%08x\n",
 			HDMI_INP_ND(0x011C));
 		if (hdmi_msm_state->full_auth_done) {
+			switch_set_state(&external_common_state->sdev, 0);
+			DEV_INFO("Hdmi state switched to %d: %s\n",
+				external_common_state->sdev.state,  __func__);
+
 			envp[0] = "HDCP_STATE=FAIL";
 			envp[1] = NULL;
 			DEV_INFO("HDMI HPD:QDSP OFF\n");
 			kobject_uevent_env(external_common_state->uevent_kobj,
 			KOBJ_CHANGE, envp);
-			switch_set_state(&external_common_state->sdev, 0);
-			DEV_INFO("Hdmi state switch to %d: %s\n",
-				external_common_state->sdev.state,  __func__);
+
 			mutex_lock(&hdcp_auth_state_mutex);
 			hdmi_msm_state->full_auth_done = FALSE;
 			mutex_unlock(&hdcp_auth_state_mutex);
@@ -2964,9 +2964,7 @@
 	char *envp[2];
 
 	if (!hdmi_msm_has_hdcp()) {
-		switch_set_state(&external_common_state->sdev, 1);
-		DEV_INFO("Hdmi state switch to %d: %s\n",
-			external_common_state->sdev.state, __func__);
+		DEV_INFO("%s: HDCP NOT ENABLED\n", __func__);
 		return;
 	}
 
@@ -3039,8 +3037,9 @@
 		kobject_uevent_env(external_common_state->uevent_kobj,
 		    KOBJ_CHANGE, envp);
 	}
+
 	switch_set_state(&external_common_state->sdev, 1);
-	DEV_INFO("Hdmi state switch to %d: %s\n",
+	DEV_INFO("Hdmi state switched to %d: %s\n",
 		external_common_state->sdev.state, __func__);
 	return;
 
@@ -3062,7 +3061,7 @@
 			    &hdmi_msm_state->hdcp_reauth_work);
 	}
 	switch_set_state(&external_common_state->sdev, 0);
-	DEV_INFO("Hdmi state switch to %d: %s\n",
+	DEV_INFO("Hdmi state switched to %d: %s\n",
 		external_common_state->sdev.state, __func__);
 }
 #endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL_HDCP_SUPPORT */
@@ -4381,7 +4380,7 @@
 	hdmi_msm_state->hpd_cable_chg_detected = FALSE;
 	/* QDSP OFF preceding the HPD event notification */
 	switch_set_state(&external_common_state->sdev, 0);
-	DEV_INFO("Hdmi state switch to %d: %s\n",
+	DEV_INFO("Hdmi state switched to %d: %s\n",
 		 external_common_state->sdev.state,  __func__);
 	if (on) {
 		hdmi_msm_read_edid();
@@ -4389,7 +4388,7 @@
 			hdmi_msm_state->reauth = FALSE ;
 		/* Build EDID table */
 		hdmi_msm_turn_on();
-		DEV_INFO("HDMI HPD: sense CONNECTED: send ONLINE\n");
+		DEV_INFO("HDMI HPD: CONNECTED: send ONLINE\n");
 		kobject_uevent(external_common_state->uevent_kobj,
 			       KOBJ_ONLINE);
 		hdmi_msm_hdcp_enable();
@@ -4402,16 +4401,15 @@
 			kobject_uevent_env(external_common_state->uevent_kobj,
 					   KOBJ_CHANGE, envp);
 			switch_set_state(&external_common_state->sdev, 1);
-			DEV_INFO("Hdmi state switch to %d: %s\n",
+			DEV_INFO("Hdmi state switched to %d: %s\n",
 				 external_common_state->sdev.state, __func__);
 		}
 	} else {
-		DEV_INFO("HDMI HPD: sense DISCONNECTED: send OFFLINE\n"
-			);
+		DEV_INFO("HDMI HPD: DISCONNECTED: send OFFLINE\n");
 		kobject_uevent(external_common_state->uevent_kobj,
 			       KOBJ_OFFLINE);
 		switch_set_state(&external_common_state->sdev, 0);
-		DEV_INFO("Hdmi state switch to %d: %s\n",
+		DEV_INFO("Hdmi state switched to %d: %s\n",
 			 external_common_state->sdev.state,  __func__);
 	}
 }
diff --git a/drivers/video/msm/lvds.c b/drivers/video/msm/lvds.c
index f5d8201..2987e2f 100644
--- a/drivers/video/msm/lvds.c
+++ b/drivers/video/msm/lvds.c
@@ -92,11 +92,11 @@
 		MDP_OUTP(MDP_BASE + 0xc3064, 0x05);
 		MDP_OUTP(MDP_BASE + 0xc3050, 0x20);
 	} else {
-		MDP_OUTP(MDP_BASE + 0xc3004, 0x62);
+		MDP_OUTP(MDP_BASE + 0xc3004, 0x8f);
 		MDP_OUTP(MDP_BASE + 0xc3008, 0x30);
-		MDP_OUTP(MDP_BASE + 0xc300c, 0xc4);
+		MDP_OUTP(MDP_BASE + 0xc300c, 0xc6);
 		MDP_OUTP(MDP_BASE + 0xc3014, 0x10);
-		MDP_OUTP(MDP_BASE + 0xc3018, 0x05);
+		MDP_OUTP(MDP_BASE + 0xc3018, 0x07);
 		MDP_OUTP(MDP_BASE + 0xc301c, 0x62);
 		MDP_OUTP(MDP_BASE + 0xc3020, 0x41);
 		MDP_OUTP(MDP_BASE + 0xc3024, 0x0d);
diff --git a/drivers/video/msm/lvds_chimei_wxga.c b/drivers/video/msm/lvds_chimei_wxga.c
index 9a385b9..39aa852 100644
--- a/drivers/video/msm/lvds_chimei_wxga.c
+++ b/drivers/video/msm/lvds_chimei_wxga.c
@@ -134,7 +134,7 @@
 	pinfo->wait_cycle = 0;
 	pinfo->bpp = 24;
 	pinfo->fb_num = 2;
-	pinfo->clk_rate = 75000000;
+	pinfo->clk_rate = 79400000;
 	pinfo->bl_max = 255;
 	pinfo->bl_min = 1;
 
diff --git a/drivers/video/msm/mdp.c b/drivers/video/msm/mdp.c
index c9be60a..bfaed8d8 100644
--- a/drivers/video/msm/mdp.c
+++ b/drivers/video/msm/mdp.c
@@ -1450,7 +1450,6 @@
 #endif
 }
 
-static int mdp_clk_rate;
 static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST];
 static int pdev_list_cnt;
 
@@ -1458,6 +1457,68 @@
 {
 	mdp_pipe_ctrl(MDP_MASTER_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 }
+
+static int mdp_clk_rate;
+
+#ifdef CONFIG_FB_MSM_NO_MDP_PIPE_CTRL
+
+static void mdp_clk_off(void)
+{
+	mb();
+	vsync_clk_disable();
+
+	if (mdp_clk != NULL)
+		clk_disable_unprepare(mdp_clk);
+
+	if (mdp_pclk != NULL)
+		clk_disable_unprepare(mdp_pclk);
+
+	if (mdp_lut_clk != NULL)
+		clk_disable_unprepare(mdp_lut_clk);
+}
+
+
+static void mdp_clk_on(void)
+{
+	if (mdp_clk != NULL)
+		clk_prepare_enable(mdp_clk);
+
+	if (mdp_pclk != NULL)
+		clk_prepare_enable(mdp_pclk);
+
+	if (mdp_lut_clk != NULL)
+		clk_prepare_enable(mdp_lut_clk);
+
+	vsync_clk_enable();
+}
+
+void mdp_clk_ctrl(int on)
+{
+	static int mdp_clk_cnt;
+
+	mutex_lock(&mdp_suspend_mutex);
+	if (on) {
+		if (mdp_clk_cnt == 0)
+			mdp_clk_on();
+		mdp_clk_cnt++;
+	} else {
+		if (mdp_clk_cnt) {
+			mdp_clk_cnt--;
+			if (mdp_clk_cnt == 0)
+				mdp_clk_off();
+		}
+	}
+	mutex_unlock(&mdp_suspend_mutex);
+}
+
+
+
+void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
+		   boolean isr)
+{
+	/* do nothing */
+}
+#else
 void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
 		   boolean isr)
 {
@@ -1620,6 +1681,12 @@
 	}
 }
 
+void mdp_clk_ctrl(int on)
+{
+	/* do nothing */
+}
+#endif
+
 void mdp_histogram_handle_isr(struct mdp_hist_mgmt *mgmt)
 {
 	uint32 isr, mask;
@@ -1933,6 +2000,15 @@
 
 	mdp_histogram_ctrl_all(FALSE);
 
+	if (mfd->panel.type == MIPI_CMD_PANEL)
+		mdp4_dsi_cmd_off(pdev);
+	else if (mfd->panel.type == MIPI_VIDEO_PANEL)
+		mdp4_dsi_video_off(pdev);
+	else if (mfd->panel.type == HDMI_PANEL ||
+			mfd->panel.type == LCDC_PANEL ||
+			mfd->panel.type == LVDS_PANEL)
+		mdp4_lcdc_off(pdev);
+
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 	ret = panel_next_off(pdev);
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
@@ -1945,18 +2021,25 @@
 static int mdp_on(struct platform_device *pdev)
 {
 	int ret = 0;
-
 #ifdef CONFIG_FB_MSM_MDP40
 	struct msm_fb_data_type *mfd;
 
 	mfd = platform_get_drvdata(pdev);
-
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-	if (is_mdp4_hw_reset()) {
+	mdp_clk_ctrl(1);
+	mdp4_hw_init();
+	outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
+	if (mfd->panel.type == MIPI_CMD_PANEL) {
 		mdp_vsync_cfg_regs(mfd, FALSE);
-		mdp4_hw_init();
-		outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
-	}
+		mdp4_dsi_cmd_on(pdev);
+	} else if (mfd->panel.type == MIPI_VIDEO_PANEL)
+		mdp4_dsi_video_on(pdev);
+	else if (mfd->panel.type == HDMI_PANEL ||
+			mfd->panel.type == LCDC_PANEL ||
+			mfd->panel.type == LVDS_PANEL)
+		mdp4_lcdc_on(pdev);
+
+	mdp_clk_ctrl(0);
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 #endif
 
@@ -1964,13 +2047,6 @@
 	ret = panel_next_on(pdev);
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
-#ifdef CONFIG_FB_MSM_MDP40
-	if (mfd->panel.type == MIPI_CMD_PANEL)
-		mdp4_dsi_cmd_overlay_restore();
-	else if (mfd->panel.type == MDDI_PANEL)
-		mdp4_mddi_overlay_restore();
-#endif
-
 	mdp_histogram_ctrl_all(TRUE);
 
 	return ret;
@@ -2236,6 +2312,8 @@
 		if (rc)
 			return rc;
 
+		mdp_clk_ctrl(1);
+
 		mdp_hw_version();
 
 		/* initializing mdp hw */
@@ -2249,6 +2327,8 @@
 #ifdef CONFIG_FB_MSM_OVERLAY
 		mdp_hw_cursor_init();
 #endif
+		mdp_clk_ctrl(0);
+
 		mdp_resource_initialized = 1;
 		return 0;
 	}
@@ -2323,6 +2403,8 @@
 	pdata->off = mdp_off;
 	pdata->next = pdev;
 
+	mdp_clk_ctrl(1);
+
 	mdp_prim_panel_type = mfd->panel.type;
 	switch (mfd->panel.type) {
 	case EXT_MDDI_PANEL:
@@ -2400,8 +2482,7 @@
 #ifndef CONFIG_FB_MSM_MDP303
 		mipi = &mfd->panel_info.mipi;
 		configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 23 / 20);
-		pdata->on = mdp4_dsi_video_on;
-		pdata->off = mdp4_dsi_video_off;
+		mdp4_dsi_vsync_init(0);
 		mfd->hw_refresh = TRUE;
 		mfd->dma_fnc = mdp4_dsi_video_overlay;
 		mfd->lut_update = mdp_lut_update_lcdc;
@@ -2444,6 +2525,7 @@
 		mfd->dma_fnc = mdp4_dsi_cmd_overlay;
 		mipi = &mfd->panel_info.mipi;
 		configure_mdp_core_clk_table((mipi->dsi_pclk_rate) * 3 / 2);
+		mdp4_dsi_rdptr_init(0);
 		if (mfd->panel_info.pdest == DISPLAY_1) {
 			if_no = PRIMARY_INTF_SEL;
 			mfd->dma = &dma2_data;
@@ -2464,6 +2546,7 @@
 		spin_unlock_irqrestore(&mdp_spin_lock, flag);
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 #else
+
 		mfd->dma_fnc = mdp_dma2_update;
 		mfd->do_histogram = mdp_do_histogram;
 		mfd->start_histogram = mdp_histogram_start;
@@ -2484,6 +2567,7 @@
 
 #ifdef CONFIG_FB_MSM_DTV
 	case DTV_PANEL:
+		mdp4_dtv_vsync_init(0);
 		pdata->on = mdp4_dtv_on;
 		pdata->off = mdp4_dtv_off;
 		mfd->hw_refresh = TRUE;
@@ -2499,8 +2583,10 @@
 	case HDMI_PANEL:
 	case LCDC_PANEL:
 	case LVDS_PANEL:
+#ifdef CONFIG_FB_MSM_MDP303
 		pdata->on = mdp_lcdc_on;
 		pdata->off = mdp_lcdc_off;
+#endif
 		mfd->hw_refresh = TRUE;
 #if	defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDP40)
 		mfd->cursor_update = mdp_hw_cursor_sync_update;
@@ -2520,6 +2606,7 @@
 #endif
 
 #ifdef CONFIG_FB_MSM_MDP40
+		mdp4_lcdc_vsync_init(0);
 		configure_mdp_core_clk_table((mfd->panel_info.clk_rate)
 								* 23 / 20);
 		if (mfd->panel.type == HDMI_PANEL) {
@@ -2580,6 +2667,7 @@
 	default:
 		printk(KERN_ERR "mdp_probe: unknown device type!\n");
 		rc = -ENODEV;
+		mdp_clk_ctrl(0);
 		goto mdp_probe_err;
 	}
 #ifdef CONFIG_FB_MSM_MDP40
@@ -2588,6 +2676,8 @@
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 #endif
 
+	mdp_clk_ctrl(0);
+
 #ifdef CONFIG_MSM_BUS_SCALING
 	if (!mdp_bus_scale_handle && mdp_pdata &&
 		mdp_pdata->mdp_bus_scale_table) {
diff --git a/drivers/video/msm/mdp.h b/drivers/video/msm/mdp.h
index 184c5ce..2411dca 100644
--- a/drivers/video/msm/mdp.h
+++ b/drivers/video/msm/mdp.h
@@ -277,12 +277,16 @@
 #ifdef CONFIG_FB_MSM_MDP40
 #define MDP_OVERLAY0_TERM 0x20
 #define MDP_OVERLAY1_TERM 0x40
+#define MDP_DMAP_TERM MDP_DMA2_TERM	/* dmap == dma2 */
+#define MDP_PRIM_VSYNC_TERM 0x100
+#define MDP_EXTER_VSYNC_TERM 0x200
+#define MDP_PRIM_RDPTR_TERM 0x400
 #endif
 #define MDP_OVERLAY2_TERM 0x80
-#define MDP_HISTOGRAM_TERM_DMA_P 0x100
-#define MDP_HISTOGRAM_TERM_DMA_S 0x200
-#define MDP_HISTOGRAM_TERM_VG_1 0x400
-#define MDP_HISTOGRAM_TERM_VG_2 0x800
+#define MDP_HISTOGRAM_TERM_DMA_P 0x10000
+#define MDP_HISTOGRAM_TERM_DMA_S 0x20000
+#define MDP_HISTOGRAM_TERM_VG_1 0x40000
+#define MDP_HISTOGRAM_TERM_VG_2 0x80000
 
 #define ACTIVE_START_X_EN BIT(31)
 #define ACTIVE_START_Y_EN BIT(31)
@@ -704,6 +708,7 @@
 void mdp_hw_init(void);
 int mdp_ppp_pipe_wait(void);
 void mdp_pipe_kickoff(uint32 term, struct msm_fb_data_type *mfd);
+void mdp_clk_ctrl(int on);
 void mdp_pipe_ctrl(MDP_BLOCK_TYPE block, MDP_BLOCK_POWER_STATE state,
 		   boolean isr);
 void mdp_set_dma_pan_info(struct fb_info *info, struct mdp_dirty_region *dirty,
@@ -751,6 +756,30 @@
 int mdp_dsi_video_off(struct platform_device *pdev);
 void mdp_dsi_video_update(struct msm_fb_data_type *mfd);
 void mdp3_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd);
+static inline int mdp4_dsi_cmd_off(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline int mdp4_dsi_video_off(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline int mdp4_lcdc_off(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline int mdp4_dsi_cmd_on(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline int mdp4_dsi_video_on(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline int mdp4_lcdc_on(struct platform_device *pdev)
+{
+	return 0;
+}
 #endif
 
 void set_cont_splashScreen_status(int);
@@ -785,6 +814,8 @@
 #endif
 
 #ifdef MDP_HW_VSYNC
+void vsync_clk_enable(void);
+void vsync_clk_disable(void);
 void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd);
 void mdp_hw_vsync_clk_disable(struct msm_fb_data_type *mfd);
 void mdp_vsync_clk_disable(void);
diff --git a/drivers/video/msm/mdp4.h b/drivers/video/msm/mdp4.h
index c9bdf27..72e7c8f 100644
--- a/drivers/video/msm/mdp4.h
+++ b/drivers/video/msm/mdp4.h
@@ -34,6 +34,7 @@
 
 #define MDP4_VIDEO_BASE 0x20000
 #define MDP4_VIDEO_OFF 0x10000
+#define MDP4_VIDEO_CSC_OFF 0x4000
 
 #define MDP4_RGB_BASE 0x40000
 #define MDP4_RGB_OFF 0x10000
@@ -120,7 +121,7 @@
 #define INTR_PRIMARY_INTF_UDERRUN	BIT(8)
 #define INTR_EXTERNAL_VSYNC		BIT(9)
 #define INTR_EXTERNAL_INTF_UDERRUN	BIT(10)
-#define INTR_PRIMARY_READ_PTR		BIT(11)
+#define INTR_PRIMARY_RDPTR		BIT(11)	/* read pointer */
 #define INTR_DMA_P_HISTOGRAM		BIT(17)
 #define INTR_DMA_S_HISTOGRAM		BIT(26)
 #define INTR_OVERLAY2_DONE		BIT(30)
@@ -219,6 +220,7 @@
 #define MDP4_OP_FLIP_UD		BIT(14)
 #define MDP4_OP_FLIP_LR		BIT(13)
 #define MDP4_OP_CSC_EN		BIT(11)
+#define MDP4_OP_DST_DATA_YCBCR	BIT(10)
 #define MDP4_OP_SRC_DATA_YCBCR	BIT(9)
 #define MDP4_OP_SCALEY_FIR 		(0 << 4)
 #define MDP4_OP_SCALEY_MN_PHASE 	(1 << 4)
@@ -258,6 +260,14 @@
 	u8 mark_unmap;
 };
 
+#define IOMMU_FREE_LIST_MAX 32
+
+struct iommu_free_list {
+	int total;
+	int fndx;
+	struct ion_handle *ihdl[IOMMU_FREE_LIST_MAX];
+};
+
 struct blend_cfg {
 	u32 op;
 	u32 bg_alpha;
@@ -337,20 +347,26 @@
 	uint32 element2; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
 	uint32 element1; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
 	uint32 element0; /* 0 = C0, 1 = C1, 2 = C2, 3 = C3 */
-	struct completion comp;
 	ulong ov_blt_addr; /* blt mode addr */
 	ulong dma_blt_addr; /* blt mode addr */
 	ulong blt_base;
 	ulong blt_offset;
 	uint32 blt_cnt;
+	uint32 blt_changed;
 	uint32 ov_cnt;
 	uint32 dmap_cnt;
 	uint32 dmae_cnt;
 	uint32 blt_end;
+	uint32 blt_ov_koff;
+	uint32 blt_ov_done;
+	uint32 blt_dmap_koff;
+	uint32 blt_dmap_done;
 	uint32 luma_align_size;
-	struct mdp4_hsic_regs hsic_regs;
-	struct completion dmas_comp;
+	struct mdp_overlay_pp_params pp_cfg;
 	struct mdp_overlay req_data;
+	struct completion comp;
+	struct completion dmas_comp;
+	struct mdp4_iommu_pipe_info iommu;
 };
 
 struct mdp4_statistic {
@@ -366,7 +382,7 @@
 	ulong intr_vsync_e;	/* external interface */
 	ulong intr_underrun_e;	/* external interface */
 	ulong intr_histogram;
-	ulong intr_rd_ptr;
+	ulong intr_rdptr;
 	ulong dsi_mdp_start;
 	ulong dsi_clk_on;
 	ulong dsi_clk_off;
@@ -387,7 +403,13 @@
 	ulong overlay_set[MDP4_MIXER_MAX];
 	ulong overlay_unset[MDP4_MIXER_MAX];
 	ulong overlay_play[MDP4_MIXER_MAX];
+	ulong overlay_commit[MDP4_MIXER_MAX];
 	ulong pipe[OVERLAY_PIPE_MAX];
+	ulong wait4vsync0;
+	ulong wait4vsync1;
+	ulong iommu_map;
+	ulong iommu_unmap;
+	ulong iommu_drop;
 	ulong dsi_clkoff;
 	ulong err_mixer;
 	ulong err_zorder;
@@ -399,6 +421,12 @@
 	ulong err_underflow;
 };
 
+struct vsync_update {
+	int update_cnt;	/* pipes to be updated */
+	struct completion vsync_comp;
+	struct mdp4_overlay_pipe plist[OVERLAY_PIPE_MAX];
+};
+
 struct mdp4_overlay_pipe *mdp4_overlay_ndx2pipe(int ndx);
 void mdp4_sw_reset(unsigned long bits);
 void mdp4_display_intf_sel(int output, unsigned long intf);
@@ -434,8 +462,10 @@
 uint32 mdp4_overlay_format(struct mdp4_overlay_pipe *pipe);
 uint32 mdp4_overlay_unpack_pattern(struct mdp4_overlay_pipe *pipe);
 uint32 mdp4_overlay_op_mode(struct mdp4_overlay_pipe *pipe);
-void mdp4_lcdc_base_swap(struct mdp4_overlay_pipe *pipe);
+void mdp4_lcdc_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
 void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd);
+
+
 #ifdef CONFIG_FB_MSM_DTV
 void mdp4_overlay_dtv_start(void);
 void mdp4_overlay_dtv_ov_done_push(struct msm_fb_data_type *mfd,
@@ -446,9 +476,10 @@
 			struct mdp4_overlay_pipe *pipe);
 int mdp4_overlay_dtv_unset(struct msm_fb_data_type *mfd,
 			struct mdp4_overlay_pipe *pipe);
-void mdp4_dma_e_done_dtv(void);
-void mdp4_overlay_dtv_wait4vsync(void);
-void mdp4_dtv_base_swap(struct mdp4_overlay_pipe *pipe);
+void mdp4_dmae_done_dtv(void);
+void mdp4_dtv_wait4vsync(int cndx, long long *vtime);
+void mdp4_dtv_vsync_ctrl(int cndx, int enable);
+void mdp4_dtv_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
 #else
 static inline void mdp4_overlay_dtv_start(void)
 {
@@ -475,11 +506,15 @@
 	return 0;
 }
 
-static inline void mdp4_dma_e_done_dtv(void)
+static inline void mdp4_dmae_done_dtv(void)
 {
     /* empty */
 }
-static inline void mdp4_overlay_dtv_wait4vsync(void)
+static inline void mdp4_dtv_wait4vsync(int cndx, long long *vtime)
+{
+    /* empty */
+}
+static inline void mdp4_dtv_vsync_ctrl(int cndx, long long *vtime)
 {
     /* empty */
 }
@@ -495,19 +530,10 @@
 {
 	/* empty */
 }
-#endif
+#endif /* CONFIG_FB_MSM_DTV */
 
 void mdp4_dtv_set_black_screen(void);
 
-static inline int mdp4_overlay_borderfill_supported(void)
-{
-	unsigned int mdp_hw_version;
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-	mdp_hw_version = inpdw(MDP_BASE + 0x0); /* MDP_HW_VERSION */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-	return (mdp_hw_version >= 0x0402030b);
-}
-
 int mdp4_overlay_dtv_set(struct msm_fb_data_type *mfd,
 			struct mdp4_overlay_pipe *pipe);
 int mdp4_overlay_dtv_unset(struct msm_fb_data_type *mfd,
@@ -520,11 +546,20 @@
 int mdp4_atv_off(struct platform_device *pdev);
 void mdp4_dsi_video_fxn_register(cmd_fxn_t fxn);
 void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd);
-int mdp4_dsi_video_on(struct platform_device *pdev);
-int mdp4_dsi_video_off(struct platform_device *pdev);
-void mdp4_overlay0_done_dsi_video(struct mdp_dma_data *dma);
-void mdp4_overlay0_done_dsi_cmd(struct mdp_dma_data *dma);
+void mdp4_lcdc_vsync_ctrl(int cndx, int enable);
+void mdp4_overlay0_done_dsi_video(int cndx);
+void mdp4_overlay0_done_dsi_cmd(int cndx);
+void mdp4_primary_rdptr(void);
 void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd);
+int mdp4_overlay_commit(struct fb_info *info, int mixer);
+int mdp4_dsi_video_pipe_commit(void);
+int mdp4_dsi_cmd_pipe_commit(void);
+int mdp4_lcdc_pipe_commit(void);
+int mdp4_dtv_pipe_commit(void);
+void mdp4_dsi_rdptr_init(int cndx);
+void mdp4_dsi_vsync_init(int cndx);
+void mdp4_lcdc_vsync_init(int cndx);
+void mdp4_dtv_vsync_init(int cndx);
 void mdp4_overlay_dsi_state_set(int state);
 int mdp4_overlay_dsi_state_get(void);
 void mdp4_overlay_rgb_setup(struct mdp4_overlay_pipe *pipe);
@@ -541,12 +576,20 @@
 int mdp4_overlay_format2pipe(struct mdp4_overlay_pipe *pipe);
 int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req);
 int mdp4_overlay_set(struct fb_info *info, struct mdp_overlay *req);
+int mdp4_overlay_wait4vsync(struct fb_info *info, long long *vtime);
+int mdp4_overlay_vsync_ctrl(struct fb_info *info, int enable);
 int mdp4_overlay_unset(struct fb_info *info, int ndx);
 int mdp4_overlay_unset_mixer(int mixer);
 int mdp4_overlay_play_wait(struct fb_info *info,
 	struct msmfb_overlay_data *req);
 int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req);
 struct mdp4_overlay_pipe *mdp4_overlay_pipe_alloc(int ptype, int mixer);
+void mdp4_overlay_dma_commit(int mixer);
+void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe);
+void mdp4_mixer_stage_commit(int mixer);
+void mdp4_dsi_cmd_do_update(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_lcdc_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dtv_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
 void mdp4_overlay_pipe_free(struct mdp4_overlay_pipe *pipe);
 void mdp4_overlay_dmap_cfg(struct msm_fb_data_type *mfd, int lcdc);
 void mdp4_overlay_dmap_xy(struct mdp4_overlay_pipe *pipe);
@@ -554,21 +597,20 @@
 void mdp4_overlay_dmae_xy(struct mdp4_overlay_pipe *pipe);
 int mdp4_overlay_pipe_staged(int mixer);
 void mdp4_lcdc_primary_vsyn(void);
-void mdp4_overlay0_done_lcdc(struct mdp_dma_data *dma);
+void mdp4_overlay0_done_lcdc(int cndx);
 void mdp4_overlay0_done_mddi(struct mdp_dma_data *dma);
-void mdp4_dma_s_done_mddi(void);
 void mdp4_dma_p_done_mddi(struct mdp_dma_data *dma);
-void mdp4_dma_p_done_dsi(struct mdp_dma_data *dma);
-void mdp4_dma_p_done_dsi_video(struct mdp_dma_data *dma);
-void mdp4_dma_p_done_lcdc(void);
+void mdp4_dmap_done_dsi_cmd(int cndx);
+void mdp4_dmap_done_dsi_video(int cndx);
+void mdp4_dmap_done_lcdc(int cndx);
 void mdp4_overlay1_done_dtv(void);
 void mdp4_overlay1_done_atv(void);
 void mdp4_primary_vsync_lcdc(void);
 void mdp4_external_vsync_dtv(void);
-void mdp4_overlay_lcdc_wait4vsync(struct msm_fb_data_type *mfd);
-void mdp4_overlay_lcdc_start(void);
+void mdp4_lcdc_wait4vsync(int cndx, long long *vtime);
 void mdp4_overlay_lcdc_vsync_push(struct msm_fb_data_type *mfd,
 				struct mdp4_overlay_pipe *pipe);
+void mdp4_overlay_dtv_set_perf(struct msm_fb_data_type *mfd);
 void mdp4_update_perf_level(u32 perf_level);
 void mdp4_set_perf_level(void);
 void mdp4_mddi_overlay_dmas_restore(void);
@@ -577,6 +619,11 @@
 void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd);
 void mdp4_mddi_overlay_restore(void);
 #else
+static inline void mdp4_mddi_kickoff_video(struct msm_fb_data_type *mfd,
+				struct mdp4_overlay_pipe *pipe)
+{
+	/* empty */
+}
 static inline void mdp4_mddi_dma_busy_wait(struct msm_fb_data_type *mfd)
 {
 	/* empty */
@@ -636,7 +683,7 @@
 					struct msmfb_overlay_blt *req);
 int mdp4_dsi_video_overlay_blt_offset(struct msm_fb_data_type *mfd,
 					struct msmfb_overlay_blt *req);
-void mdp4_dsi_video_base_swap(struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_video_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
 
 #ifdef CONFIG_FB_MSM_MDP40
 static inline void mdp3_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
@@ -644,7 +691,7 @@
 	/* empty */
 }
 #endif
-#else
+#else     /* CONFIG_FB_MSM_MIPI_DSI */
 int mdp4_mddi_overlay_blt_offset(struct msm_fb_data_type *mfd,
 					struct msmfb_overlay_blt *req);
 void mdp4_mddi_overlay_blt(struct msm_fb_data_type *mfd,
@@ -684,11 +731,12 @@
 {
 	return -ENODEV;
 }
-static inline void mdp4_dsi_video_base_swap(struct mdp4_overlay_pipe *pipe)
+static inline void mdp4_dsi_video_base_swap(int cndx,
+			struct mdp4_overlay_pipe *pipe)
 {
 	/* empty */
 }
-#endif
+#endif  /* CONFIG_FB_MSM_MIPI_DSI */
 
 void mdp4_lcdc_overlay_blt(struct msm_fb_data_type *mfd,
 					struct msmfb_overlay_blt *req);
@@ -712,51 +760,87 @@
 
 void mdp4_dsi_cmd_dma_busy_check(void);
 
+
+
 #ifdef CONFIG_FB_MSM_MIPI_DSI
-void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd);
-void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd);
-void mdp4_overlay_dsi_video_start(void);
-void mdp4_overlay_dsi_video_vsync_push(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe);
-void mdp4_dsi_cmd_overlay_restore(void);
 void mdp_dsi_cmd_overlay_suspend(struct msm_fb_data_type *mfd);
+int mdp4_dsi_cmd_on(struct platform_device *pdev);
+int mdp4_dsi_cmd_off(struct platform_device *pdev);
+int mdp4_dsi_video_off(struct platform_device *pdev);
+int mdp4_dsi_video_on(struct platform_device *pdev);
+void mdp4_primary_vsync_dsi_video(void);
+void mdp4_dsi_cmd_base_swap(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime);
+void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime);
+void mdp4_dsi_cmd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_video_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe);
+void mdp4_dsi_cmd_vsync_ctrl(int cndx, int enable);
+void mdp4_dsi_video_vsync_ctrl(int cndx, int enable);
 #ifdef CONFIG_FB_MSM_MDP303
 static inline void mdp4_dsi_cmd_del_timer(void)
 {
 	/* empty */
 }
-#else
+#else /* CONFIG_FB_MSM_MIPI_DSI */
 void mdp4_dsi_cmd_del_timer(void);
 #endif
-#else
-static inline void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
+#else  /* CONFIG_FB_MSM_MIPI_DSI */
+
+static inline int mdp4_dsi_cmd_on(struct platform_device *pdev)
 {
-	/* empty */
+	return 0;
 }
-static inline void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd)
+static inline int mdp4_dsi_cmd_off(struct platform_device *pdev)
 {
-	/* empty */
+	return 0;
 }
+static inline int mdp4_dsi_video_on(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline int mdp4_dsi_video_off(struct platform_device *pdev)
+{
+	return 0;
+}
+static inline void mdp4_primary_vsync_dsi_video(void)
+{
+}
+static inline void mdp4_dsi_cmd_base_swap(int cndx,
+	struct mdp4_overlay_pipe *pipe)
+{
+}
+static inline void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime)
+{
+}
+static inline void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime)
+{
+}
+static inline void mdp4_dsi_cmd_pipe_queue(int cndx,
+			struct mdp4_overlay_pipe *pipe)
+{
+}
+static inline void mdp4_dsi_video_pipe_queue(int cndx,
+			struct mdp4_overlay_pipe *pipe)
+{
+}
+static inline void mdp4_dsi_cmd_vsync_ctrl(int cndx, int enable)
+{
+}
+static inline void mdp4_dsi_video_vsync_ctrl(int cndx, int enable)
+{
+}
+
 static inline void mdp4_overlay_dsi_video_start(void)
 {
 	/* empty */
 }
-static inline void mdp4_overlay_dsi_video_vsync_push(
-	struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe)
-{
-	/* empty */
-}
-static inline void mdp4_dsi_cmd_overlay_restore(void)
-{
-	/* empty */
-}
 #ifdef CONFIG_FB_MSM_MDP40
 static inline void mdp_dsi_cmd_overlay_suspend(struct msm_fb_data_type *mfd)
 {
 	/* empty */
 }
 #endif
-#endif /* MIPI_DSI */
+#endif /* CONFIG_FB_MSM_MIPI_DSI */
 
 void mdp4_dsi_cmd_kickoff_ui(struct msm_fb_data_type *mfd,
 				struct mdp4_overlay_pipe *pipe);
@@ -764,8 +848,6 @@
 				struct mdp4_overlay_pipe *pipe);
 void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd,
 				struct mdp4_overlay_pipe *pipe);
-void mdp4_dsi_cmd_base_swap(struct mdp4_overlay_pipe *pipe);
-
 void mdp4_overlay_panel_3d(int mixer_num, uint32 panel_3d);
 int mdp4_overlay_3d_sbys(struct fb_info *info, struct msmfb_overlay_3d *req);
 void mdp4_dsi_cmd_3d_sbys(struct msm_fb_data_type *mfd,
@@ -773,6 +855,9 @@
 void mdp4_dsi_video_3d_sbys(struct msm_fb_data_type *mfd,
 			 struct msmfb_overlay_3d *r3d);
 
+void mdp4_backlight_init(int cndx);
+void mdp4_backlight_put_level(int cndx, int level);
+
 int mdp4_mixer_info(int mixer_num, struct mdp_mixer_info *info);
 
 void mdp_dmap_vsync_set(int enable);
@@ -782,11 +867,18 @@
 int mdp4_mddi_overlay_cursor(struct fb_info *info, struct fb_cursor *cursor);
 int mdp_ppp_blit(struct fb_info *info, struct mdp_blit_req *req);
 void mdp4_overlay_resource_release(void);
-void mdp4_overlay_dsi_video_wait4vsync(struct msm_fb_data_type *mfd);
-void mdp4_primary_vsync_dsi_video(void);
 uint32_t mdp4_ss_table_value(int8_t param, int8_t index);
 void mdp4_overlay_borderfill_stage_down(struct mdp4_overlay_pipe *pipe);
 
+#ifdef CONFIG_FB_MSM_MDP303
+static inline int mdp4_overlay_borderfill_supported(void)
+{
+	return 0;
+}
+#else
+int mdp4_overlay_borderfill_supported(void);
+#endif
+
 int mdp4_overlay_writeback_on(struct platform_device *pdev);
 int mdp4_overlay_writeback_off(struct platform_device *pdev);
 void mdp4_writeback_overlay(struct msm_fb_data_type *mfd);
@@ -808,7 +900,6 @@
 uint32_t mdp_block2base(uint32_t block);
 int mdp_hist_lut_config(struct mdp_hist_lut_data *data);
 
-void mdp4_hsic_set(struct mdp4_overlay_pipe *pipe, struct dpp_ctrl *ctrl);
 void mdp4_hsic_update(struct mdp4_overlay_pipe *pipe);
 int mdp4_csc_config(struct mdp_csc_cfg_data *config);
 void mdp4_csc_write(struct mdp_csc_cfg *data, uint32_t base);
@@ -816,11 +907,16 @@
 int mdp4_pcc_cfg(struct mdp_pcc_cfg_data *cfg_ptr);
 int mdp4_argc_cfg(struct mdp_pgc_lut_data *pgc_ptr);
 int mdp4_qseed_cfg(struct mdp_qseed_cfg_data *cfg);
+int mdp4_qseed_access_cfg(struct mdp_qseed_cfg *cfg, uint32_t base);
 u32  mdp4_allocate_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num);
 void mdp4_init_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num);
 void mdp4_free_writeback_buf(struct msm_fb_data_type *mfd, u32 mix_num);
 
 int mdp4_igc_lut_config(struct mdp_igc_lut_data *cfg);
+void mdp4_overlay_iommu_pipe_free(int ndx, int all);
+void mdp4_overlay_iommu_free_list(int mixer, struct ion_handle *ihdl);
+void mdp4_overlay_iommu_unmap_freelist(int mixer);
+void mdp4_overlay_iommu_vsync_cnt(void);
 void mdp4_iommu_unmap(struct mdp4_overlay_pipe *pipe);
 void mdp4_iommu_attach(void);
 int mdp4_v4l2_overlay_set(struct fb_info *info, struct mdp_overlay *req,
diff --git a/drivers/video/msm/mdp4_hsic.c b/drivers/video/msm/mdp4_hsic.c
deleted file mode 100644
index 5735f45..0000000
--- a/drivers/video/msm/mdp4_hsic.c
+++ /dev/null
@@ -1,534 +0,0 @@
-/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/msm_mdp.h>
-#include "mdp.h"
-#include "mdp4.h"
-
-/* Definitions */
-#define MDP4_CSC_MV_OFF		0x4400
-#define MDP4_CSC_PRE_BV_OFF	0x4500
-#define MDP4_CSC_POST_BV_OFF	0x4580
-#define MDP4_CSC_PRE_LV_OFF	0x4600
-#define MDP4_CSC_POST_LV_OFF	0x4680
-#define MDP_VG1_BASE	(MDP_BASE + MDP4_VIDEO_BASE)
-
-#define MDP_VG1_CSC_MVn(n)	(MDP_VG1_BASE + MDP4_CSC_MV_OFF + 4 * (n))
-#define MDP_VG1_CSC_PRE_LVn(n)	(MDP_VG1_BASE + MDP4_CSC_PRE_LV_OFF + 4 * (n))
-#define MDP_VG1_CSC_POST_LVn(n)	(MDP_VG1_BASE + MDP4_CSC_POST_LV_OFF + 4 * (n))
-#define MDP_VG1_CSC_PRE_BVn(n)	(MDP_VG1_BASE + MDP4_CSC_PRE_BV_OFF + 4 * (n))
-#define MDP_VG1_CSC_POST_BVn(n)	(MDP_VG1_BASE + MDP4_CSC_POST_BV_OFF + 4 * (n))
-
-#define Q16	(16)
-#define Q16_ONE	(1 << Q16)
-
-#define Q16_VALUE(x)	((int32_t)((uint32_t)x << Q16))
-#define Q16_PERCENT_VALUE(x, n)	((int32_t)( \
-				div_s64(((int64_t)x * (int64_t)Q16_ONE), n)))
-
-#define Q16_WHOLE(x)	((int32_t)(x >> 16))
-#define Q16_FRAC(x)	((int32_t)(x & 0xFFFF))
-#define Q16_S1Q16_MUL(x, y)	(((x >> 1) * (y >> 1)) >> 14)
-
-#define Q16_MUL(x, y)	((int32_t)((((int64_t)x) * ((int64_t)y)) >> Q16))
-#define Q16_NEGATE(x)	(0 - (x))
-
-/*
- * HSIC Control min/max values
- *    These settings are based on the maximum/minimum allowed modifications to
- *    HSIC controls for layer and display color.  Allowing too much variation in
- *    the CSC block will result in color clipping resulting in unwanted color
- *    shifts.
- */
-#define TRIG_MAX	Q16_VALUE(128)
-#define CON_SAT_MAX	Q16_VALUE(128)
-#define INTENSITY_MAX	(Q16_VALUE(2047) >> 12)
-
-#define HUE_MAX	Q16_VALUE(100)
-#define HUE_MIN	Q16_VALUE(-100)
-#define HUE_DEF	Q16_VALUE(0)
-
-#define SAT_MAX	Q16_VALUE(100)
-#define SAT_MIN	Q16_VALUE(-100)
-#define SAT_DEF	CON_SAT_MAX
-
-#define CON_MAX	Q16_VALUE(100)
-#define CON_MIN	Q16_VALUE(-100)
-#define CON_DEF	CON_SAT_MAX
-
-#define INTEN_MAX	Q16_VALUE(100)
-#define INTEN_MIN	Q16_VALUE(-100)
-#define INTEN_DEF	Q16_VALUE(0)
-
-enum {
-	DIRTY,
-	GENERATED,
-	CLEAN
-};
-
-/* local vars*/
-static int32_t csc_matrix_tab[3][3] = {
-	{0x00012a00, 0x00000000, 0x00019880},
-	{0x00012a00, 0xffff9b80, 0xffff3000},
-	{0x00012a00, 0x00020480, 0x00000000}
-};
-
-static int32_t csc_yuv2rgb_conv_tab[3][3] = {
-	{0x00010000, 0x00000000, 0x000123cb},
-	{0x00010000, 0xffff9af9, 0xffff6b5e},
-	{0x00010000, 0x00020838, 0x00000000}
-};
-
-static int32_t csc_rgb2yuv_conv_tab[3][3] = {
-	{0x00004c8b, 0x00009645, 0x00001d2f},
-	{0xffffda56, 0xffffb60e, 0x00006f9d},
-	{0x00009d70, 0xffff7c2a, 0xffffe666}
-};
-
-static uint32_t csc_pre_bv_tab[3]  = {0xfffff800, 0xffffc000, 0xffffc000};
-static uint32_t csc_post_bv_tab[3] = {0x00000000, 0x00000000, 0x00000000};
-
-static uint32_t csc_pre_lv_tab[6] =  {0x00000000, 0x00007f80, 0x00000000,
-					0x00007f80, 0x00000000, 0x00007f80};
-static uint32_t csc_post_lv_tab[6] = {0x00000000, 0x00007f80, 0x00000000,
-					0x00007f80, 0x00000000, 0x00007f80};
-
-/* Lookup table for Sin/Cos lookup - Q16*/
-static const int32_t  trig_lut[65] = {
-	0x00000000, /* sin((2*M_PI/256) * 0x00);*/
-	0x00000648, /* sin((2*M_PI/256) * 0x01);*/
-	0x00000C90, /* sin((2*M_PI/256) * 0x02);*/
-	0x000012D5,
-	0x00001918,
-	0x00001F56,
-	0x00002590,
-	0x00002BC4,
-	0x000031F1,
-	0x00003817,
-	0x00003E34,
-	0x00004447,
-	0x00004A50,
-	0x0000504D,
-	0x0000563E,
-	0x00005C22,
-	0x000061F8,
-	0x000067BE,
-	0x00006D74,
-	0x0000731A,
-	0x000078AD,
-	0x00007E2F,
-	0x0000839C,
-	0x000088F6,
-	0x00008E3A,
-	0x00009368,
-	0x00009880,
-	0x00009D80,
-	0x0000A268,
-	0x0000A736,
-	0x0000ABEB,
-	0x0000B086,
-	0x0000B505,
-	0x0000B968,
-	0x0000BDAF,
-	0x0000C1D8,
-	0x0000C5E4,
-	0x0000C9D1,
-	0x0000CD9F,
-	0x0000D14D,
-	0x0000D4DB,
-	0x0000D848,
-	0x0000DB94,
-	0x0000DEBE,
-	0x0000E1C6,
-	0x0000E4AA,
-	0x0000E768,
-	0x0000EA0A,
-	0x0000EC83,
-	0x0000EED9,
-	0x0000F109,
-	0x0000F314,
-	0x0000F4FA,
-	0x0000F6BA,
-	0x0000F854,
-	0x0000F9C8,
-	0x0000FB15,
-	0x0000FC3B,
-	0x0000FD3B,
-	0x0000FE13,
-	0x0000FEC4,
-	0x0000FF4E,
-	0x0000FFB1,
-	0x0000FFEC,
-	0x00010000, /* sin((2*M_PI/256) * 0x40);*/
-};
-
-void trig_values_q16(int32_t deg, int32_t *cos, int32_t *sin)
-{
-	int32_t   angle;
-	int32_t   quad, anglei, anglef;
-	int32_t   v0 = 0, v1 = 0;
-	int32_t   t1, t2;
-
-	/*
-	 * Scale the angle so that 256 is one complete revolution and mask it
-	 * to this domain
-	 * NOTE: 0xB60B == 256/360
-	 */
-	angle = Q16_MUL(deg, 0xB60B) & 0x00FFFFFF;
-
-	/* Obtain a quadrant number, integer, and fractional part */
-	quad   =  angle >> 22;
-	anglei = (angle >> 16) & 0x3F;
-	anglef =  angle & 0xFFFF;
-
-	/*
-	 * Using the integer part, obtain the lookup table entry and its
-	 * complement. Using the quadrant, swap and negate these as
-	 * necessary.
-	 * (The values and all derivatives of sine and cosine functions
-	 * can be derived from these values)
-	 */
-	switch (quad) {
-	case 0x0:
-		v0 += trig_lut[anglei];
-		v1 += trig_lut[0x40-anglei];
-		break;
-
-	case 0x1:
-		v0 += trig_lut[0x40-anglei];
-		v1 -= trig_lut[anglei];
-		break;
-
-	case 0x2:
-		v0 -= trig_lut[anglei];
-		v1 -= trig_lut[0x40-anglei];
-		break;
-
-	case 0x3:
-		v0 -= trig_lut[0x40-anglei];
-		v1 += trig_lut[anglei];
-		break;
-	}
-
-	/*
-	 * Multiply the fractional part by 2*PI/256 to move it from lookup
-	 *  table units to radians, giving us the coefficient for first
-	 *  derivatives.
-	 */
-	t1 = Q16_S1Q16_MUL(anglef, 0x0648);
-
-	/*
-	 * Square this and divide by 2 to get the coefficient for second
-	 *   derivatives
-	 */
-	t2 = Q16_S1Q16_MUL(t1, t1) >> 1;
-
-	*sin = v0 + Q16_S1Q16_MUL(v1, t1) - Q16_S1Q16_MUL(v0, t2);
-
-	*cos = v1 - Q16_S1Q16_MUL(v0, t1) - Q16_S1Q16_MUL(v1, t2);
-}
-
-/* Convert input Q16 value to s4.9 */
-int16_t convert_q16_s49(int32_t q16Value)
-{	/* Top half is the whole number, Bottom half is fractional portion*/
-	int16_t whole = Q16_WHOLE(q16Value);
-	int32_t fraction  = Q16_FRAC(q16Value);
-
-	/* Clamp whole to 3 bits */
-	if (whole > 7)
-		whole = 7;
-	else if (whole < -7)
-		whole = -7;
-
-	/* Reduce fraction to 9 bits. */
-	fraction = (fraction<<9)>>Q16;
-
-	return (int16_t) ((int16_t)whole<<9) | ((int16_t)fraction);
-}
-
-/* Convert input Q16 value to uint16 */
-int16_t convert_q16_int16(int32_t val)
-{
-	int32_t rounded;
-
-	if (val >= 0) {
-		/* Add 0.5 */
-		rounded = val + (Q16_ONE>>1);
-	} else {
-		/* Subtract 0.5 */
-		rounded = val - (Q16_ONE>>1);
-	}
-
-	/* Truncate rounded value */
-	return (int16_t)(rounded>>Q16);
-}
-
-/*
- * norm_q16
- *              Return a Q16 value represeting a normalized value
- *
- * value       -100%                 0%               +100%
- *                 |-----------------|----------------|
- *                 ^                 ^                ^
- *             q16MinValue     q16DefaultValue       q16MaxValue
- *
- */
-int32_t norm_q16(int32_t value, int32_t min, int32_t default_val, int32_t max,
-								int32_t range)
-{
-	int32_t diff, perc, mul, result;
-
-	if (0 == value) {
-		result = default_val;
-	} else if (value > 0) {
-		/* value is between 0% and +100% represent 1.0 -> QRange Max */
-		diff = range;
-		perc = Q16_PERCENT_VALUE(value, max);
-		mul = Q16_MUL(perc, diff);
-		result = default_val + mul;
-	} else {
-		/* if (value <= 0) */
-		diff = -range;
-		perc = Q16_PERCENT_VALUE(-value, -min);
-		mul = Q16_MUL(perc, diff);
-		result = default_val + mul;
-	}
-	return result;
-}
-
-void matrix_mul_3x3(int32_t dest[][3], int32_t a[][3], int32_t b[][3])
-{
-	int32_t i, j, k;
-	int32_t tmp[3][3];
-
-	for (i = 0; i < 3; i++) {
-		for (j = 0; j < 3; j++) {
-			tmp[i][j] = 0;
-			for (k = 0; k < 3; k++)
-				tmp[i][j] += Q16_MUL(a[i][k], b[k][j]);
-		}
-	}
-
-	/* in case dest = a or b*/
-	for (i = 0; i < 3; i++) {
-		for (j = 0; j < 3; j++)
-			dest[i][j] = tmp[i][j];
-	}
-}
-
-#define CONVERT(x)	(x)/*convert_q16_s49((x))*/
-void pr_params(struct mdp4_hsic_regs *regs)
-{
-	int i;
-	if (regs) {
-		for (i = 0; i < NUM_HSIC_PARAM; i++) {
-			pr_info("\t: hsic->params[%d] =	0x%08x [raw = 0x%08x]\n",
-			i, CONVERT(regs->params[i]), regs->params[i]);
-		}
-	}
-}
-
-void pr_3x3_matrix(int32_t in[][3])
-{
-	pr_info("\t[0x%08x\t0x%08x\t0x%08x]\n", CONVERT(in[0][0]),
-	CONVERT(in[0][1]), CONVERT(in[0][2]));
-	pr_info("\t[0x%08x\t0x%08x\t0x%08x]\n", CONVERT(in[1][0]),
-	CONVERT(in[1][1]), CONVERT(in[1][2]));
-	pr_info("\t[0x%08x\t0x%08x\t0x%08x]\n", CONVERT(in[2][0]),
-	CONVERT(in[2][1]), CONVERT(in[2][2]));
-}
-
-void _hsic_get(struct mdp4_hsic_regs *regs, int32_t type, int8_t *val)
-{
-	if (type < 0 || type >= NUM_HSIC_PARAM)
-		BUG_ON(-EINVAL);
-	*val = regs->params[type];
-	pr_info("%s: getting params[%d] = %d\n", __func__, type, *val);
-}
-
-void _hsic_set(struct mdp4_hsic_regs *regs, int32_t type, int8_t val)
-{
-	if (type < 0 || type >= NUM_HSIC_PARAM)
-		BUG_ON(-EINVAL);
-
-	if (regs->params[type] != Q16_VALUE(val)) {
-		regs->params[type] = Q16_VALUE(val);
-		regs->dirty = DIRTY;
-	}
-}
-
-void _hsic_generate_csc_matrix(struct mdp4_overlay_pipe *pipe)
-{
-	int i, j;
-	int32_t sin, cos;
-
-	int32_t hue_matrix[3][3];
-	int32_t con_sat_matrix[3][3];
-	struct mdp4_hsic_regs *regs = &(pipe->hsic_regs);
-
-	memset(con_sat_matrix, 0x0, sizeof(con_sat_matrix));
-	memset(hue_matrix, 0x0, sizeof(hue_matrix));
-
-	/*
-	 * HSIC control require matrix multiplication of these two tables
-	 *  [T 0 0][1 0  0]   T = Contrast       C=Cos(Hue)
-	 *  [0 S 0][0 C -N]   S = Saturation     N=Sin(Hue)
-	 *  [0 0 S][0 N  C]
-	 */
-
-	con_sat_matrix[0][0] = norm_q16(regs->params[HSIC_CON], CON_MIN,
-						CON_DEF, CON_MAX, CON_SAT_MAX);
-	con_sat_matrix[1][1] = norm_q16(regs->params[HSIC_SAT], SAT_MIN,
-						SAT_DEF, SAT_MAX, CON_SAT_MAX);
-	con_sat_matrix[2][2] = con_sat_matrix[1][1];
-
-	hue_matrix[0][0] = TRIG_MAX;
-
-	trig_values_q16(norm_q16(regs->params[HSIC_HUE], HUE_MIN, HUE_DEF,
-					 HUE_MAX, TRIG_MAX), &cos, &sin);
-
-	cos = Q16_MUL(cos, TRIG_MAX);
-	sin = Q16_MUL(sin, TRIG_MAX);
-
-	hue_matrix[1][1] = cos;
-	hue_matrix[2][2] = cos;
-	hue_matrix[2][1] = sin;
-	hue_matrix[1][2] = Q16_NEGATE(sin);
-
-	/* Generate YUV CSC matrix */
-	matrix_mul_3x3(regs->conv_matrix, con_sat_matrix, hue_matrix);
-
-	if (!(pipe->op_mode & MDP4_OP_SRC_DATA_YCBCR)) {
-		/* Convert input RGB to YUV then apply CSC matrix */
-		pr_info("Pipe %d, has RGB input\n", pipe->pipe_num);
-		matrix_mul_3x3(regs->conv_matrix, regs->conv_matrix,
-							csc_rgb2yuv_conv_tab);
-	}
-
-	/* Normalize the matrix */
-	for (i = 0; i < 3; i++) {
-		for (j = 0; j < 3; j++)
-			regs->conv_matrix[i][j] = (regs->conv_matrix[i][j]>>14);
-	}
-
-	/* Multiply above result by current csc table */
-	matrix_mul_3x3(regs->conv_matrix, regs->conv_matrix, csc_matrix_tab);
-
-	if (!(pipe->op_mode & MDP4_OP_SRC_DATA_YCBCR)) {
-		/*HACK:only "works"for src side*/
-		/* Convert back to RGB */
-		pr_info("Pipe %d, has RGB output\n", pipe->pipe_num);
-		matrix_mul_3x3(regs->conv_matrix, csc_yuv2rgb_conv_tab,
-							regs->conv_matrix);
-	}
-
-	/* Update clamps pre and post. */
-	/* TODO: different tables for different color formats? */
-	for (i = 0; i < 6; i++) {
-		regs->pre_limit[i] = csc_pre_lv_tab[i];
-		regs->post_limit[i] = csc_post_lv_tab[i];
-	}
-
-	/* update bias values, pre and post */
-	for (i = 0; i < 3; i++) {
-		regs->pre_bias[i] = csc_pre_bv_tab[i];
-		regs->post_bias[i] = csc_post_bv_tab[i] +
-				norm_q16(regs->params[HSIC_INT],
-				INTEN_MIN, INTEN_DEF, INTEN_MAX, INTENSITY_MAX);
-	}
-
-	regs->dirty = GENERATED;
-}
-
-void _hsic_update_mdp(struct mdp4_overlay_pipe *pipe)
-{
-	struct mdp4_hsic_regs *regs = &(pipe->hsic_regs);
-	int i, j, k;
-
-	uint32_t *csc_mv;
-	uint32_t *pre_lv;
-	uint32_t *post_lv;
-	uint32_t *pre_bv;
-	uint32_t *post_bv;
-
-	switch (pipe->pipe_num) {
-	case OVERLAY_PIPE_VG2:
-		csc_mv = (uint32_t *) (MDP_VG1_CSC_MVn(0) +
-					MDP4_VIDEO_OFF);
-		pre_lv = (uint32_t *) (MDP_VG1_CSC_PRE_LVn(0) +
-					MDP4_VIDEO_OFF);
-		post_lv = (uint32_t *) (MDP_VG1_CSC_POST_LVn(0) +
-					MDP4_VIDEO_OFF);
-		pre_bv = (uint32_t *) (MDP_VG1_CSC_PRE_BVn(0) +
-					MDP4_VIDEO_OFF);
-		post_bv = (uint32_t *) (MDP_VG1_CSC_POST_BVn(0) +
-					MDP4_VIDEO_OFF);
-		break;
-	case OVERLAY_PIPE_VG1:
-	default:
-			csc_mv = (uint32_t *) MDP_VG1_CSC_MVn(0);
-			pre_lv = (uint32_t *) MDP_VG1_CSC_PRE_LVn(0);
-			post_lv = (uint32_t *) MDP_VG1_CSC_POST_LVn(0);
-			pre_bv = (uint32_t *) MDP_VG1_CSC_PRE_BVn(0);
-			post_bv = (uint32_t *) MDP_VG1_CSC_POST_BVn(0);
-		break;
-	}
-
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
-	for (i = 0; i < 3; i++) {
-		for (j = 0; j < 3; j++) {
-			k = (3*i) + j;
-			MDP_OUTP(csc_mv + k, convert_q16_s49(
-						regs->conv_matrix[i][j]));
-		}
-	}
-
-	for (i = 0; i < 6; i++) {
-		MDP_OUTP(pre_lv + i, convert_q16_s49(regs->pre_limit[i]));
-		MDP_OUTP(post_lv + i, convert_q16_s49(regs->post_limit[i]));
-	}
-
-	for (i = 0; i < 3; i++) {
-		MDP_OUTP(pre_bv + i, convert_q16_s49(regs->pre_bias[i]));
-		MDP_OUTP(post_bv + i, convert_q16_s49(regs->post_bias[i]));
-	}
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-
-	regs->dirty = CLEAN;
-}
-
-void mdp4_hsic_get(struct mdp4_overlay_pipe *pipe, struct dpp_ctrl *ctrl)
-{
-	int i;
-	for (i = 0; i < NUM_HSIC_PARAM; i++)
-		_hsic_get(&(pipe->hsic_regs), i, &(ctrl->hsic_params[i]));
-}
-
-void mdp4_hsic_set(struct mdp4_overlay_pipe *pipe, struct dpp_ctrl *ctrl)
-{
-	int i;
-	for (i = 0; i < NUM_HSIC_PARAM; i++)
-		_hsic_set(&(pipe->hsic_regs), i, ctrl->hsic_params[i]);
-
-	if (pipe->hsic_regs.dirty == DIRTY)
-		_hsic_generate_csc_matrix(pipe);
-}
-
-void mdp4_hsic_update(struct mdp4_overlay_pipe *pipe)
-{
-	if (pipe->hsic_regs.dirty == GENERATED)
-		_hsic_update_mdp(pipe);
-}
diff --git a/drivers/video/msm/mdp4_overlay.c b/drivers/video/msm/mdp4_overlay.c
index 703d65d..7da011f 100644
--- a/drivers/video/msm/mdp4_overlay.c
+++ b/drivers/video/msm/mdp4_overlay.c
@@ -50,6 +50,7 @@
 	struct blend_cfg blend[MDP4_MIXER_MAX][MDP4_MIXER_STAGE_MAX];
 	uint32 mixer_cfg[MDP4_MIXER_MAX];
 	uint32 flush[MDP4_MIXER_MAX];
+	struct iommu_free_list iommu_free[MDP4_MIXER_MAX];
 	uint32 cs_controller;
 	uint32 hw_version;
 	uint32 panel_3d;
@@ -101,17 +102,104 @@
 	},
 };
 
+static DEFINE_MUTEX(iommu_mutex);
 static struct mdp4_overlay_ctrl *ctrl = &mdp4_overlay_db;
 static int new_perf_level;
 static struct ion_client *display_iclient;
-static struct mdp4_iommu_pipe_info mdp_iommu[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
+
+
+/*
+ * mdp4_overlay_iommu_unmap_freelist()
+ * mdp4_overlay_iommu_2freelist()
+ * mdp4_overlay_iommu_pipe_free()
+ * above three functiosns need to be called from same thread and
+ * in order so that no mutex are needed.
+ */
+void mdp4_overlay_iommu_unmap_freelist(int mixer)
+{
+	int i;
+	struct ion_handle *ihdl;
+	struct iommu_free_list *flist;
+
+	mutex_lock(&iommu_mutex);
+	flist = &ctrl->iommu_free[mixer];
+	if (flist->total == 0) {
+		mutex_unlock(&iommu_mutex);
+		return;
+	}
+	for (i = 0; i < IOMMU_FREE_LIST_MAX; i++) {
+		ihdl = flist->ihdl[i];
+		if (ihdl == NULL)
+			continue;
+		pr_debug("%s: mixer=%d i=%d ihdl=0x%p\n", __func__,
+					mixer, i, ihdl);
+		ion_unmap_iommu(display_iclient, ihdl, DISPLAY_READ_DOMAIN,
+							GEN_POOL);
+		mdp4_stat.iommu_unmap++;
+		ion_free(display_iclient, ihdl);
+		flist->ihdl[i] = NULL;
+	}
+
+	flist->fndx = 0;
+	flist->total = 0;
+	mutex_unlock(&iommu_mutex);
+}
+
+void mdp4_overlay_iommu_2freelist(int mixer, struct ion_handle *ihdl)
+{
+	struct iommu_free_list *flist;
+
+	flist = &ctrl->iommu_free[mixer];
+	if (flist->fndx >= IOMMU_FREE_LIST_MAX) {
+		pr_err("%s: Error, mixer=%d iommu fndx=%d\n",
+				__func__, mixer, flist->fndx);
+		mdp4_stat.iommu_drop++;
+		mutex_unlock(&iommu_mutex);
+		return;
+	}
+
+	pr_debug("%s: add mixer=%d fndx=%d ihdl=0x%p\n", __func__,
+				mixer, flist->fndx, ihdl);
+
+	flist->total++;
+	flist->ihdl[flist->fndx++] = ihdl;
+}
+
+void mdp4_overlay_iommu_pipe_free(int ndx, int all)
+{
+	struct mdp4_overlay_pipe *pipe;
+	struct mdp4_iommu_pipe_info *iom;
+	int plane, mixer;
+
+	pipe = mdp4_overlay_ndx2pipe(ndx);
+	if (pipe == NULL)
+		return;
+
+	mutex_lock(&iommu_mutex);
+	mixer = pipe->mixer_num;
+	iom = &pipe->iommu;
+	pr_debug("%s: mixer=%d ndx=%d all=%d\n", __func__,
+				mixer, pipe->pipe_ndx, all);
+	for (plane = 0; plane < MDP4_MAX_PLANE; plane++) {
+		if (iom->prev_ihdl[plane]) {
+			mdp4_overlay_iommu_2freelist(mixer,
+					iom->prev_ihdl[plane]);
+			iom->prev_ihdl[plane] = NULL;
+		}
+		if (all && iom->ihdl[plane]) {
+			mdp4_overlay_iommu_2freelist(mixer, iom->ihdl[plane]);
+			iom->ihdl[plane] = NULL;
+		}
+	}
+	mutex_unlock(&iommu_mutex);
+}
 
 int mdp4_overlay_iommu_map_buf(int mem_id,
 	struct mdp4_overlay_pipe *pipe, unsigned int plane,
 	unsigned long *start, unsigned long *len,
 	struct ion_handle **srcp_ihdl)
 {
-	struct mdp4_iommu_pipe_info *iom_pipe_info;
+	struct mdp4_iommu_pipe_info *iom;
 
 	if (!display_iclient)
 		return -EINVAL;
@@ -133,30 +221,21 @@
 		return -EINVAL;
 	}
 
-	iom_pipe_info = &mdp_iommu[pipe->mixer_num][pipe->pipe_ndx - 1];
-	if (!iom_pipe_info->ihdl[plane]) {
-		iom_pipe_info->ihdl[plane] = *srcp_ihdl;
-	} else {
-		if (iom_pipe_info->prev_ihdl[plane]) {
-			ion_unmap_iommu(display_iclient,
-				iom_pipe_info->prev_ihdl[plane],
-				DISPLAY_READ_DOMAIN, GEN_POOL);
-			ion_free(display_iclient,
-				iom_pipe_info->prev_ihdl[plane]);
-			pr_debug("Previous: mixer %u, pipe %u, plane %u, "
-				"prev_ihdl %p\n", pipe->mixer_num,
-				pipe->pipe_ndx, plane,
-				iom_pipe_info->prev_ihdl[plane]);
-		}
+	mutex_lock(&iommu_mutex);
+	mdp4_stat.iommu_map++;
+	iom = &pipe->iommu;
+	iom->prev_ihdl[plane] = iom->ihdl[plane];
+	iom->ihdl[plane] = *srcp_ihdl;
 
-		iom_pipe_info->prev_ihdl[plane] = iom_pipe_info->ihdl[plane];
-		iom_pipe_info->ihdl[plane] = *srcp_ihdl;
-	}
-	pr_debug("mem_id %d, start 0x%lx, len 0x%lx\n",
-		mem_id, *start, *len);
+	pr_debug("%s: ndx=%d plane=%d prev=0x%p cur=0x%p start=0x%lx len=%lx\n",
+		 __func__, pipe->pipe_ndx, plane, iom->prev_ihdl[plane],
+			iom->ihdl[plane], *start, *len);
+	mutex_unlock(&iommu_mutex);
 	return 0;
 }
 
+static struct mdp4_iommu_pipe_info mdp_iommu[MDP4_MIXER_MAX][OVERLAY_PIPE_MAX];
+
 void mdp4_iommu_unmap(struct mdp4_overlay_pipe *pipe)
 {
 	struct mdp4_iommu_pipe_info *iom_pipe_info;
@@ -183,9 +262,7 @@
 
 			if (iom_pipe_info->mark_unmap) {
 				if (iom_pipe_info->ihdl[i]) {
-					if (pipe->mixer_num == MDP4_MIXER1)
-						mdp4_overlay_dtv_wait4vsync();
-					pr_debug("%s(): mixer %u, pipe %u, plane %u, "
+					pr_debug("%s(): MARK, mixer %u, pipe %u, plane %u, "
 						"ihdl %p\n", __func__,
 						pipe->mixer_num, j + 1, i,
 						iom_pipe_info->ihdl[i]);
@@ -246,6 +323,11 @@
 	}
 }
 
+int mdp4_overlay_borderfill_supported(void)
+{
+	return (ctrl->hw_version >= 0x0402030b);
+}
+
 void mdp4_overlay_dmae_cfg(struct msm_fb_data_type *mfd, int atv)
 {
 	uint32	dmae_cfg_reg;
@@ -421,7 +503,7 @@
 {
 	uint32 off, bpp;
 
-	if (mdp_is_in_isr == FALSE)
+	if (!in_interrupt())
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 
 	if (pipe->dma_blt_addr) {
@@ -447,7 +529,7 @@
 	/* dma_p dest */
 	MDP_OUTP(MDP_BASE + 0x90010, (pipe->dst_y << 16 | pipe->dst_x));
 
-	if (mdp_is_in_isr == FALSE)
+	if (!in_interrupt())
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 }
 
@@ -684,7 +766,7 @@
 	char *vg_base;
 	uint32 frame_size, src_size, src_xy, dst_size, dst_xy;
 	uint32 format, pattern, luma_offset, chroma_offset;
-	uint32 mask, curr, addr;
+	uint32 mask;
 	int pnum, ptype;
 
 	pnum = pipe->pipe_num - OVERLAY_PIPE_VG1; /* start from 0 */
@@ -701,10 +783,29 @@
 	format = mdp4_overlay_format(pipe);
 	pattern = mdp4_overlay_unpack_pattern(pipe);
 
+	/* CSC Post Processing enabled? */
+	if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
+		if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_CSC_CFG) {
+			if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_ENABLE)
+				pipe->op_mode |= MDP4_OP_CSC_EN;
+			if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_YUV_IN)
+				pipe->op_mode |= MDP4_OP_SRC_DATA_YCBCR;
+			if (pipe->pp_cfg.csc_cfg.flags & MDP_CSC_FLAG_YUV_OUT)
+				pipe->op_mode |= MDP4_OP_DST_DATA_YCBCR;
+
+			mdp4_csc_write(&pipe->pp_cfg.csc_cfg,
+				(uint32_t) (vg_base + MDP4_VIDEO_CSC_OFF));
+		}
+		if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_QSEED_CFG) {
+			mdp4_qseed_access_cfg(&pipe->pp_cfg.qseed_cfg[0],
+							(uint32_t) vg_base);
+			mdp4_qseed_access_cfg(&pipe->pp_cfg.qseed_cfg[1],
+							(uint32_t) vg_base);
+		}
+	}
 	/* not RGB use VG pipe, pure VG pipe */
-	pipe->op_mode |= MDP4_OP_CSC_EN;
 	if (ptype != OVERLAY_TYPE_RGB)
-		pipe->op_mode |= MDP4_OP_SRC_DATA_YCBCR;
+		pipe->op_mode |= (MDP4_OP_CSC_EN | MDP4_OP_SRC_DATA_YCBCR);
 
 #ifdef MDP4_IGC_LUT_ENABLE
 	pipe->op_mode |= MDP4_OP_IGC_LUT_EN;
@@ -748,24 +849,6 @@
 			&chroma_offset);
 	}
 
-	/* Ensure proper covert matrix loaded when color space swaps */
-	curr = inpdw(vg_base + 0x0058);
-	mask = 0x600;
-
-	if ((curr & mask) != (pipe->op_mode & mask)) {
-		addr = ((uint32_t)vg_base) + 0x4000;
-		if (ptype != OVERLAY_TYPE_RGB)
-			mdp4_csc_write(&(mdp_csc_convert[1]), addr);
-		else
-			mdp4_csc_write(&(mdp_csc_convert[0]), addr);
-
-		mask = 0xFFFCFFFF;
-	} else {
-		/* Don't touch bits you don't want to configure*/
-		mask = 0xFFFCF1FF;
-	}
-	pipe->op_mode = (pipe->op_mode & mask) | (curr & ~mask);
-
 	/* luma component plane */
 	outpdw(vg_base + 0x0010, pipe->srcp0_addr + luma_offset);
 
@@ -798,15 +881,6 @@
 			pipe->r_bit << 4 | pipe->b_bit << 2 | pipe->g_bit);
 	}
 
-	if (pipe->flags & MDP_SHARPENING) {
-		outpdw(vg_base + 0x8200,
-			mdp4_ss_table_value(pipe->req_data.dpp.sharp_strength,
-									0));
-		outpdw(vg_base + 0x8204,
-			mdp4_ss_table_value(pipe->req_data.dpp.sharp_strength,
-									1));
-	}
-
 	if (mdp_rev > MDP_REV_41) {
 		/* mdp chip select controller */
 		mask = 0;
@@ -1315,7 +1389,7 @@
 	} else
 		overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
 
-	if (mdp_is_in_isr == FALSE)
+	if (!in_interrupt())
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 
 	/*
@@ -1410,7 +1484,7 @@
 	outpdw(overlay_base + 0x0014, curr | 0x4);	/* GC_LUT_EN, 888 */
 #endif
 
-	if (mdp_is_in_isr == FALSE)
+	if (!in_interrupt())
 		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 }
 
@@ -1473,7 +1547,7 @@
 	return cnt;
 }
 
-static void mdp4_mixer_stage_commit(int mixer)
+void mdp4_mixer_stage_commit(int mixer)
 {
 	struct mdp4_overlay_pipe *pipe;
 	int i, num;
@@ -1537,20 +1611,13 @@
 
 	for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
 		pp = ctrl->stage[mixer][i];
-		if (pp == pipe) {
+		if (pp && pp->pipe_ndx == pipe->pipe_ndx) {
 			ctrl->stage[mixer][i] = NULL;
 			break;
 		}
 	}
 
 	ctrl->stage[mixer][pipe->mixer_stage] = pipe;	/* keep it */
-
-	if (!(pipe->flags & MDP_OV_PLAY_NOWAIT)) {
-		pr_debug("%s: mixer=%d ndx=%d stage=%d flags=%x\n",
-			__func__, mixer, pipe->pipe_ndx,
-				pipe->mixer_stage, pipe->flags);
-		mdp4_mixer_stage_commit(mixer);
-	}
 }
 
 void mdp4_mixer_stage_down(struct mdp4_overlay_pipe *pipe)
@@ -1562,16 +1629,11 @@
 
 	for (i = MDP4_MIXER_STAGE_BASE; i < MDP4_MIXER_STAGE_MAX; i++) {
 		pp = ctrl->stage[mixer][i];
-		if (pp == pipe)
+		if (pp && pp->pipe_ndx == pipe->pipe_ndx)
 			ctrl->stage[mixer][i] = NULL;  /* clear it */
 	}
 
-	if (!(pipe->flags & MDP_OV_PLAY_NOWAIT)) {
-		pr_debug("%s: mixer=%d ndx=%d stage=%d flags=%x\n",
-			__func__, pipe->mixer_num, pipe->pipe_ndx,
-				pipe->mixer_stage, pipe->flags);
-		mdp4_mixer_stage_commit(pipe->mixer_num);
-	}
+	mdp4_mixer_stage_commit(mixer);
 }
 /*
  * mixer0: rgb3: border color at register 0x15004, 0x15008
@@ -1616,11 +1678,13 @@
 	bspipe->pipe_used = 0;
 
 	if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
-		mdp4_dsi_video_base_swap(pipe);
+		mdp4_dsi_video_base_swap(0, pipe);
+	else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+		mdp4_dsi_cmd_base_swap(0, pipe);
 	else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
-		mdp4_lcdc_base_swap(pipe);
+		mdp4_lcdc_base_swap(0, pipe);
 	else if (ctrl->panel_mode & MDP4_PANEL_DTV)
-		mdp4_dtv_base_swap(pipe);
+		mdp4_dtv_base_swap(0, pipe);
 
 	mdp4_overlay_reg_flush(bspipe, 1);
 	/* borderfill pipe as base layer */
@@ -1667,7 +1731,14 @@
 	/* free borderfill pipe */
 	pipe->pipe_used = 0;
 
-	mdp4_dsi_video_base_swap(bspipe);
+	if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+		mdp4_dsi_video_base_swap(0, bspipe);
+	else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+		mdp4_dsi_cmd_base_swap(0, bspipe);
+	else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+		mdp4_lcdc_base_swap(0, bspipe);
+	else if (ctrl->panel_mode & MDP4_PANEL_DTV)
+		mdp4_dtv_base_swap(0, bspipe);
 
 	/* free borderfill pipe */
 	mdp4_overlay_reg_flush(pipe, 1);
@@ -1764,7 +1835,7 @@
 	struct mdp4_overlay_pipe *d_pipe;
 	struct mdp4_overlay_pipe *s_pipe;
 	struct blend_cfg *blend;
-	int i, off, ptype;
+	int i, off, ptype, alpha_drop = 0;
 	int d_alpha, s_alpha;
 	unsigned char *overlay_base;
 	uint32 c0, c1, c2;
@@ -1788,13 +1859,21 @@
 			d_alpha = 0;
 			continue;
 		}
+		/* alpha channel is lost on VG pipe when using QSEED or M/N */
+		if (s_pipe->pipe_type == OVERLAY_TYPE_VIDEO &&
+			((s_pipe->op_mode & MDP4_OP_SCALEY_EN) ||
+			(s_pipe->op_mode & MDP4_OP_SCALEX_EN)) &&
+			!(s_pipe->op_mode & MDP4_OP_SCALEY_PIXEL_RPT))
+			alpha_drop = 1;
+
 		d_pipe = mdp4_background_layer(mixer, s_pipe);
 		d_alpha = d_pipe->alpha_enable;
 		s_alpha = s_pipe->alpha_enable;
 		pr_debug("%s: stage=%d: bg: ndx=%d da=%d dalpha=%x "
-			"fg: ndx=%d sa=%d salpha=%x is_fg=%d\n",
+			"fg: ndx=%d sa=%d salpha=%x is_fg=%d alpha_drop=%d\n",
 		 __func__, i-2, d_pipe->pipe_ndx, d_alpha, d_pipe->alpha,
-		s_pipe->pipe_ndx, s_alpha, s_pipe->alpha, s_pipe->is_fg);
+		s_pipe->pipe_ndx, s_alpha, s_pipe->alpha, s_pipe->is_fg,
+		alpha_drop);
 
 		/* base on fg's alpha */
 		blend->bg_alpha = 0x0ff - s_pipe->alpha;
@@ -1807,14 +1886,23 @@
 				blend->solidfill_pipe = d_pipe;
 			}
 		} else if (s_alpha) {
-			blend->op = (MDP4_BLEND_BG_ALPHA_FG_PIXEL |
-				    MDP4_BLEND_BG_INV_ALPHA);
+			if (!alpha_drop) {
+				blend->op = MDP4_BLEND_BG_ALPHA_FG_PIXEL;
+				if (!(s_pipe->flags & MDP_BLEND_FG_PREMULT))
+					blend->op |=
+						MDP4_BLEND_FG_ALPHA_FG_PIXEL;
+			} else
+				blend->op = MDP4_BLEND_BG_ALPHA_FG_CONST;
+
+			blend->op |= MDP4_BLEND_BG_INV_ALPHA;
 		} else if (d_alpha) {
 			ptype = mdp4_overlay_format2type(s_pipe->src_format);
 			if (ptype == OVERLAY_TYPE_VIDEO) {
-				blend->op = (MDP4_BLEND_BG_ALPHA_BG_PIXEL |
-				    MDP4_BLEND_FG_ALPHA_BG_PIXEL |
-				    MDP4_BLEND_FG_INV_ALPHA);
+				blend->op = (MDP4_BLEND_FG_ALPHA_BG_PIXEL |
+					MDP4_BLEND_FG_INV_ALPHA);
+				if (!(s_pipe->flags & MDP_BLEND_FG_PREMULT))
+					blend->op |=
+						MDP4_BLEND_BG_ALPHA_BG_PIXEL;
 				blend->co3_sel = 0; /* use bg alpha */
 			} else {
 				/* s_pipe is rgb without alpha */
@@ -1993,6 +2081,8 @@
 	iom_pipe_info = &mdp_iommu[pipe->mixer_num][pipe->pipe_ndx - 1];
 	iom_pipe_info->mark_unmap = 1;
 
+	mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+
 	memset(pipe, 0, sizeof(*pipe));
 
 	pipe->pipe_type = ptype;
@@ -2105,6 +2195,62 @@
 		return -ERANGE;
 	}
 
+	if (req->src_rect.h > 0xFFF) {
+		pr_err("%s: src_h is out of range: 0X%x!\n",
+		       __func__, req->src_rect.h);
+		mdp4_stat.err_size++;
+		return -EINVAL;
+	}
+
+	if (req->src_rect.w > 0xFFF) {
+		pr_err("%s: src_w is out of range: 0X%x!\n",
+		       __func__, req->src_rect.w);
+		mdp4_stat.err_size++;
+		return -EINVAL;
+	}
+
+	if (req->src_rect.x > 0xFFF) {
+		pr_err("%s: src_x is out of range: 0X%x!\n",
+		       __func__, req->src_rect.x);
+		mdp4_stat.err_size++;
+		return -EINVAL;
+	}
+
+	if (req->src_rect.y > 0xFFF) {
+		pr_err("%s: src_y is out of range: 0X%x!\n",
+		       __func__, req->src_rect.y);
+		mdp4_stat.err_size++;
+		return -EINVAL;
+	}
+
+	if (req->dst_rect.h > 0xFFF) {
+		pr_err("%s: dst_h is out of range: 0X%x!\n",
+		       __func__, req->dst_rect.h);
+		mdp4_stat.err_size++;
+		return -EINVAL;
+	}
+
+	if (req->dst_rect.w > 0xFFF) {
+		pr_err("%s: dst_w is out of range: 0X%x!\n",
+		       __func__, req->dst_rect.w);
+		mdp4_stat.err_size++;
+		return -EINVAL;
+	}
+
+	if (req->dst_rect.x > 0xFFF) {
+		pr_err("%s: dst_x is out of range: 0X%x!\n",
+		       __func__, req->dst_rect.x);
+		mdp4_stat.err_size++;
+		return -EINVAL;
+	}
+
+	if (req->dst_rect.y > 0xFFF) {
+		pr_err("%s: dst_y is out of range: 0X%x!\n",
+		       __func__, req->dst_rect.y);
+		mdp4_stat.err_size++;
+		return -EINVAL;
+	}
+
 	if (req->src_rect.h == 0 || req->src_rect.w == 0) {
 		pr_err("%s: src img of zero size!\n", __func__);
 		mdp4_stat.err_size++;
@@ -2204,7 +2350,6 @@
 	}
 
 	iom_pipe_info = &mdp_iommu[pipe->mixer_num][pipe->pipe_ndx - 1];
-	iom_pipe_info->mark_unmap = 0;
 
 	pipe->src_format = req->src.format;
 	ret = mdp4_overlay_format2pipe(pipe);
@@ -2372,29 +2517,6 @@
 	return 0;
 }
 
-int mdp4_overlay_blt_offset(struct fb_info *info, struct msmfb_overlay_blt *req)
-{
-	int ret = 0;
-
-	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-
-	if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
-		return -EINTR;
-
-	if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
-		ret = mdp4_dsi_overlay_blt_offset(mfd, req);
-	else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
-		ret = mdp4_dsi_video_overlay_blt_offset(mfd, req);
-	else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
-		ret = mdp4_lcdc_overlay_blt_offset(mfd, req);
-	else if (ctrl->panel_mode & MDP4_PANEL_MDDI)
-		mdp4_mddi_overlay_blt_offset(mfd, req);
-
-	mutex_unlock(&mfd->dma->ov_mutex);
-
-	return ret;
-}
-
 int mdp4_overlay_get(struct fb_info *info, struct mdp_overlay *req)
 {
 	struct mdp4_overlay_pipe *pipe;
@@ -2544,7 +2666,6 @@
 	if (mfd->ov1_blt_state == mfd->use_ov1_blt)
 		return;
 	if (mfd->use_ov1_blt) {
-		mdp4_allocate_writeback_buf(mfd, MDP4_MIXER1);
 		mdp4_dtv_overlay_blt_start(mfd);
 		pr_debug("%s overlay1 writeback is enabled\n", __func__);
 	} else {
@@ -2657,23 +2778,17 @@
 		}
 	}
 
-	if (pipe->flags & MDP_SHARPENING) {
-		bool test = ((pipe->req_data.dpp.sharp_strength > 0) &&
-			((req->src_rect.w > req->dst_rect.w) &&
-			 (req->src_rect.h > req->dst_rect.h)));
-		if (test) {
-			pr_debug("%s: No sharpening while downscaling.\n",
-								__func__);
-			pipe->flags &= ~MDP_SHARPENING;
-		}
-	}
-
-	/* precompute HSIC matrices */
-	if (req->flags & MDP_DPP_HSIC)
-		mdp4_hsic_set(pipe, &(req->dpp));
-
 	mdp4_stat.overlay_set[pipe->mixer_num]++;
 
+	if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
+		if (pipe->pipe_num <= OVERLAY_PIPE_VG2)
+			memcpy(&pipe->pp_cfg, &req->overlay_pp_cfg,
+					sizeof(struct mdp_overlay_pp_params));
+		else
+			pr_debug("%s: RGB Pipes don't support CSC/QSEED\n",
+								__func__);
+	}
+
 	if (ctrl->panel_mode & MDP4_PANEL_DTV &&
 	    pipe->mixer_num == MDP4_MIXER1) {
 		u32 use_blt = mdp4_overlay_blt_enable(req, mfd, perf_level);
@@ -2701,8 +2816,6 @@
 				if (old_level > perf_level)
 					mdp4_set_perf_level();
 			} else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
-				mdp4_dsi_cmd_dma_busy_wait(mfd);
-				mdp4_dsi_blt_dmap_busy_wait(mfd);
 				mdp4_set_perf_level();
 			} else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
 				if (old_level > perf_level)
@@ -2713,10 +2826,8 @@
 				mdp4_set_perf_level();
 			}
 		} else {
-			if (ctrl->panel_mode & MDP4_PANEL_DTV) {
-				mdp4_overlay_reg_flush(pipe, 0);
-				mdp4_overlay_dtv_ov_done_push(mfd, pipe);
-			}
+			if (ctrl->panel_mode & MDP4_PANEL_DTV)
+				mdp4_overlay_dtv_set_perf(mfd);
 		}
 	}
 	mutex_unlock(&mfd->dma->ov_mutex);
@@ -2734,6 +2845,7 @@
 		if (pipe == NULL)
 			continue;
 		pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
+		mdp4_overlay_reg_flush(pipe, 1);
 		mdp4_mixer_stage_down(pipe);
 		mdp4_overlay_pipe_free(pipe);
 		cnt++;
@@ -2746,8 +2858,6 @@
 {
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
 	struct mdp4_overlay_pipe *pipe;
-	struct dpp_ctrl dpp;
-	int i;
 
 	if (mfd == NULL)
 		return -ENODEV;
@@ -2775,83 +2885,32 @@
 	else {
 		/* mixer 0 */
 		ctrl->mixer0_played = 0;
-#ifdef CONFIG_FB_MSM_MIPI_DSI
-		if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
-			if (mfd->panel_power_on) {
-				mdp4_dsi_blt_dmap_busy_wait(mfd);
-			}
-		}
-#else
+
 		if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
 			if (mfd->panel_power_on)
 				mdp4_mddi_blt_dmap_busy_wait(mfd);
 		}
-#endif
 	}
 
-	{
-		mdp4_overlay_reg_flush(pipe, 1);
+	mdp4_overlay_reg_flush(pipe, 1);
+	mdp4_mixer_stage_down(pipe);
 
-		if (mfd->use_ov0_blt || pipe->mixer_num == MDP4_MIXER1) {
-			/* unstage pipe forcedly */
-			pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
+	if (pipe->mixer_num == MDP4_MIXER0) {
+		if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+			if (mfd->panel_power_on)
+				mdp4_mddi_overlay_restore();
 		}
 
-		mdp4_mixer_stage_down(pipe);
-
-		if (pipe->mixer_num == MDP4_MIXER0) {
-			mfd->use_ov0_blt &= ~(1 << (pipe->pipe_ndx-1));
-			mdp4_overlay_update_blt_mode(mfd);
-#ifdef CONFIG_FB_MSM_MIPI_DSI
-			if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
-				if (mfd->panel_power_on) {
-					mdp4_dsi_cmd_overlay_restore();
-					mdp4_dsi_cmd_dma_busy_wait(mfd);
-					mdp4_dsi_blt_dmap_busy_wait(mfd);
-				}
-			} else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
-				pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
-				if (mfd->panel_power_on)
-					mdp4_overlay_dsi_video_vsync_push(mfd,
-									  pipe);
-			}
-#else
-			if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
-				if (mfd->panel_power_on) {
-					mdp4_mddi_overlay_restore();
-					mdp4_mddi_dma_busy_wait(mfd);
-					mdp4_mddi_blt_dmap_busy_wait(mfd);
-				}
-			}
-#endif
-			else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
-				pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
-				if (mfd->panel_power_on)
-					mdp4_overlay_lcdc_vsync_push(mfd, pipe);
-			}
-			if (!mfd->use_ov0_blt)
-				mdp4_free_writeback_buf(mfd, MDP4_MIXER0);
-		} else {	/* mixer1, DTV, ATV */
-			if (ctrl->panel_mode & MDP4_PANEL_DTV) {
-				mdp4_overlay_dtv_unset(mfd, pipe);
-				mfd->use_ov1_blt &= ~(1 << (pipe->pipe_ndx-1));
-				mdp4_overlay1_update_blt_mode(mfd);
-				if (!mfd->use_ov1_blt)
-					mdp4_free_writeback_buf(mfd,
-								MDP4_MIXER1);
-			}
+		mfd->use_ov0_blt &= ~(1 << (pipe->pipe_ndx-1));
+		mdp4_overlay_update_blt_mode(mfd);
+	} else {	/* mixer1, DTV, ATV */
+		if (ctrl->panel_mode & MDP4_PANEL_DTV) {
+			mdp4_overlay_dtv_unset(mfd, pipe);
+			mfd->use_ov1_blt &= ~(1 << (pipe->pipe_ndx-1));
+			mdp4_overlay1_update_blt_mode(mfd);
 		}
 	}
 
-	/* Reset any HSIC settings to default */
-	if (pipe->flags & MDP_DPP_HSIC) {
-		for (i = 0; i < NUM_HSIC_PARAM; i++)
-			dpp.hsic_params[i] = 0;
-
-		mdp4_hsic_set(pipe, &dpp);
-		mdp4_hsic_update(pipe);
-	}
-
 	mdp4_stat.overlay_unset[pipe->mixer_num]++;
 
 	mdp4_overlay_pipe_free(pipe);
@@ -2861,6 +2920,36 @@
 	return 0;
 }
 
+int mdp4_overlay_wait4vsync(struct fb_info *info, long long *vtime)
+{
+	if (info->node == 0) {
+		if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+			mdp4_dsi_video_wait4vsync(0, vtime);
+		else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+			mdp4_dsi_cmd_wait4vsync(0, vtime);
+		else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+			mdp4_lcdc_wait4vsync(0, vtime);
+	} else if (info->node == 1)
+		mdp4_dtv_wait4vsync(0, vtime);
+
+	return 0;
+}
+
+int mdp4_overlay_vsync_ctrl(struct fb_info *info, int enable)
+{
+	if (info->node == 0) {
+		if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO)
+			mdp4_dsi_video_vsync_ctrl(0, enable);
+		else if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD)
+			mdp4_dsi_cmd_vsync_ctrl(0, enable);
+		else if (ctrl->panel_mode & MDP4_PANEL_LCDC)
+			mdp4_lcdc_vsync_ctrl(0, enable);
+	} else if (info->node == 1)
+		mdp4_dtv_vsync_ctrl(0, enable);
+
+	return 0;
+}
+
 
 struct tile_desc {
 	uint32 width;  /* tile's width */
@@ -2900,37 +2989,39 @@
 
 int mdp4_overlay_play_wait(struct fb_info *info, struct msmfb_overlay_data *req)
 {
-	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
-	struct mdp4_overlay_pipe *pipe;
-
-	if (mfd == NULL)
-		return -ENODEV;
-
-	if (!mfd->panel_power_on) /* suspended */
-		return -EPERM;
-
-	pipe = mdp4_overlay_ndx2pipe(req->id);
-
-	if (!pipe) {
-		mdp4_stat.err_play++;
-		return -ENODEV;
-	}
-
-	if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
-		return -EINTR;
-
-	mdp4_mixer_stage_commit(pipe->mixer_num);
-
-	if (mfd->use_ov1_blt)
-		mdp4_overlay1_update_blt_mode(mfd);
-
-	mdp4_overlay_dtv_wait4vsync();
-	mdp4_iommu_unmap(pipe);
-
-	mutex_unlock(&mfd->dma->ov_mutex);
 	return 0;
 }
 
+/*
+ * mdp4_overlay_dma_commit: called from dma_done isr
+ * No mutex/sleep allowed
+ */
+void mdp4_overlay_dma_commit(int mixer)
+{
+	/*
+	* non double buffer register update here
+	* perf level, new clock rate should be done here
+	*/
+}
+
+/*
+ * mdp4_overlay_vsync_commit: called from tasklet context
+ * No mutex/sleep allowed
+ */
+void mdp4_overlay_vsync_commit(struct mdp4_overlay_pipe *pipe)
+{
+	if (pipe->pipe_type == OVERLAY_TYPE_VIDEO)
+		mdp4_overlay_vg_setup(pipe);	/* video/graphic pipe */
+	else
+		mdp4_overlay_rgb_setup(pipe);	/* rgb pipe */
+
+	pr_debug("%s: pipe=%x ndx=%d num=%d used=%d\n", __func__,
+		(int) pipe, pipe->pipe_ndx, pipe->pipe_num, pipe->pipe_used);
+
+	mdp4_overlay_reg_flush(pipe, 1);
+	mdp4_mixer_stage_up(pipe);
+}
+
 int mdp4_overlay_play(struct fb_info *info, struct msmfb_overlay_data *req)
 {
 	struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
@@ -2958,20 +3049,18 @@
 		return -ENODEV;
 	}
 
-	if (mutex_lock_interruptible(&mfd->dma->ov_mutex))
-		return -EINTR;
-
 	if (pipe->pipe_type == OVERLAY_TYPE_BF) {
 		mdp4_overlay_borderfill_stage_up(pipe);
-		mutex_unlock(&mfd->dma->ov_mutex);
 		return 0;
 	}
 
+	if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+		mutex_lock(&mfd->dma->ov_mutex);
+
 	img = &req->data;
 	get_img(img, info, pipe, 0, &start, &len, &srcp0_file,
 		&ps0_need, &srcp0_ihdl);
 	if (len == 0) {
-		mutex_unlock(&mfd->dma->ov_mutex);
 		pr_err("%s: pmem Error\n", __func__);
 		ret = -1;
 		goto end;
@@ -2982,8 +3071,9 @@
 	pipe->srcp0_ystride = pipe->src_width * pipe->bpp;
 
 
-	pr_debug("%s: mixer=%d ndx=%x addr=%x flags=%x\n", __func__,
-		pipe->mixer_num, pipe->pipe_ndx, (int)addr, pipe->flags);
+	pr_debug("%s: mixer=%d ndx=%x addr=%x flags=%x pid=%d\n", __func__,
+		pipe->mixer_num, pipe->pipe_ndx, (int)addr, pipe->flags,
+							current->pid);
 
 	if ((req->version_key & VERSION_KEY_MASK) == 0xF9E8D700)
 		overlay_version = (req->version_key & ~VERSION_KEY_MASK);
@@ -2994,7 +3084,6 @@
 			get_img(img, info, pipe, 1, &start, &len, &srcp1_file,
 				&p_need, &srcp1_ihdl);
 			if (len == 0) {
-				mutex_unlock(&mfd->dma->ov_mutex);
 				pr_err("%s: Error to get plane1\n", __func__);
 				ret = -EINVAL;
 				goto end;
@@ -3026,7 +3115,6 @@
 			get_img(img, info, pipe, 1, &start, &len, &srcp1_file,
 				&p_need, &srcp1_ihdl);
 			if (len == 0) {
-				mutex_unlock(&mfd->dma->ov_mutex);
 				pr_err("%s: Error to get plane1\n", __func__);
 				ret = -EINVAL;
 				goto end;
@@ -3037,7 +3125,6 @@
 			get_img(img, info, pipe, 2, &start, &len, &srcp2_file,
 				&p_need, &srcp2_ihdl);
 			if (len == 0) {
-				mutex_unlock(&mfd->dma->ov_mutex);
 				pr_err("%s: Error to get plane2\n", __func__);
 				ret = -EINVAL;
 				goto end;
@@ -3081,16 +3168,33 @@
 	if (mfd->use_ov1_blt)
 		mdp4_overlay1_update_blt_mode(mfd);
 
+	if (ctrl->panel_mode & MDP4_PANEL_MDDI)
+		goto mddi;
+
+	if (pipe->mixer_num == MDP4_MIXER0) {
+		if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
+			/* cndx = 0 */
+			mdp4_dsi_cmd_pipe_queue(0, pipe);
+		}
+		if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
+			/* cndx = 0 */
+			mdp4_dsi_video_pipe_queue(0, pipe);
+		} else if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
+			/* cndx = 0 */
+			mdp4_lcdc_pipe_queue(0, pipe);
+		}
+	} else if (pipe->mixer_num == MDP4_MIXER1) {
+		if (ctrl->panel_mode & MDP4_PANEL_DTV)
+			mdp4_dtv_pipe_queue(0, pipe);/* cndx = 0 */
+	}
+
+	return ret;
+
+mddi:
 
 	if (pipe->pipe_type == OVERLAY_TYPE_VIDEO) {
-		mdp4_overlay_vg_setup(pipe);	/* video/graphic pipe */
+		mdp4_overlay_vg_setup(pipe);    /* video/graphic pipe */
 	} else {
-		if (pipe->flags & MDP_SHARPENING) {
-			pr_debug(
-			"%s: Sharpening/Smoothing not supported on RGB pipe\n",
-								     __func__);
-			pipe->flags &= ~MDP_SHARPENING;
-		}
 		mdp4_overlay_rgb_setup(pipe);	/* rgb pipe */
 	}
 
@@ -3102,73 +3206,21 @@
 	}
 
 	mdp4_mixer_stage_up(pipe);
+	if (!(pipe->flags & MDP_OV_PLAY_NOWAIT))
+		mdp4_mixer_stage_commit(pipe->mixer_num);
 
-	if (pipe->mixer_num == MDP4_MIXER2) {
-		ctrl->mixer2_played++;
-#ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
-		if (ctrl->panel_mode & MDP4_PANEL_WRITEBACK) {
-			mdp4_writeback_dma_busy_wait(mfd);
-			mdp4_writeback_kickoff_video(mfd, pipe);
-		}
-#endif
-	} else if (pipe->mixer_num == MDP4_MIXER1) {
-		ctrl->mixer1_played++;
-		/* enternal interface */
-		if (ctrl->panel_mode & MDP4_PANEL_DTV) {
-			if (pipe->flags & MDP_OV_PLAY_NOWAIT)
-				mdp4_overlay_flush_piggyback(MDP4_MIXER0,
-							MDP4_MIXER1);
-			mdp4_overlay_dtv_start();
-			mdp4_overlay_dtv_ov_done_push(mfd, pipe);
-			if (!mfd->use_ov1_blt)
-				mdp4_overlay1_update_blt_mode(mfd);
-		}
-	} else {
 
-		/* primary interface */
-		ctrl->mixer0_played++;
-		if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
-			mdp4_overlay_lcdc_start();
-			mdp4_overlay_lcdc_vsync_push(mfd, pipe);
-			if (!mfd->use_ov0_blt &&
-					!(pipe->flags & MDP_OV_PLAY_NOWAIT))
-				mdp4_overlay_update_blt_mode(mfd);
-		}
-#ifdef CONFIG_FB_MSM_MIPI_DSI
-		else if (ctrl->panel_mode & MDP4_PANEL_DSI_VIDEO) {
-			mdp4_overlay_dsi_video_start();
-			mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
-			if (!mfd->use_ov0_blt &&
-					!(pipe->flags & MDP_OV_PLAY_NOWAIT))
-				mdp4_overlay_update_blt_mode(mfd);
-		}
-#endif
-		else {
-			mdp4_overlay_reg_flush_reset(pipe);
-			/* mddi & mipi dsi cmd mode */
-			if (pipe->flags & MDP_OV_PLAY_NOWAIT) {
-				mdp4_stat.overlay_play[pipe->mixer_num]++;
-				mutex_unlock(&mfd->dma->ov_mutex);
-				goto end;
-			}
-#ifdef CONFIG_FB_MSM_MIPI_DSI
-			if (ctrl->panel_mode & MDP4_PANEL_DSI_CMD) {
-				mdp4_iommu_attach();
-				mdp4_dsi_cmd_dma_busy_wait(mfd);
-				mdp4_dsi_cmd_kickoff_video(mfd, pipe);
-			}
-#else
-			if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
-				mdp4_mddi_dma_busy_wait(mfd);
-				mdp4_mddi_kickoff_video(mfd, pipe);
-			}
-#endif
-		}
+	if (pipe->flags & MDP_OV_PLAY_NOWAIT) {
+		mdp4_stat.overlay_play[pipe->mixer_num]++;
+		mutex_unlock(&mfd->dma->ov_mutex);
+		goto end;
 	}
 
-	/* write out DPP HSIC registers */
-	if (pipe->flags & MDP_DPP_HSIC)
-		mdp4_hsic_update(pipe);
+	if (ctrl->panel_mode & MDP4_PANEL_MDDI) {
+		mdp4_mddi_dma_busy_wait(mfd);
+		mdp4_mddi_kickoff_video(mfd, pipe);
+	}
+
 	if (!(pipe->flags & MDP_OV_PLAY_NOWAIT))
 		mdp4_iommu_unmap(pipe);
 	mdp4_stat.overlay_play[pipe->mixer_num]++;
@@ -3315,6 +3367,7 @@
 
 void mdp4_v4l2_overlay_clear(struct mdp4_overlay_pipe *pipe)
 {
+	mdp4_overlay_reg_flush(pipe, 1);
 	mdp4_mixer_stage_down(pipe);
 	mdp4_overlay_pipe_free(pipe);
 }
@@ -3380,6 +3433,10 @@
 
 	mdp4_mixer_stage_up(pipe);
 
+#ifdef V4L2_VSYNC
+	/*
+	 * TODO: incorporate v4l2 into vsycn driven mechanism
+	 */
 	if (ctrl->panel_mode & MDP4_PANEL_LCDC) {
 		mdp4_overlay_lcdc_vsync_push(mfd, pipe);
 	} else {
@@ -3395,6 +3452,8 @@
 		}
 #endif
 	}
+#endif
+
 done:
 	mutex_unlock(&mfd->dma->ov_mutex);
 	return err;
diff --git a/drivers/video/msm/mdp4_overlay_dsi_cmd.c b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
index d0ac1a6..7998d8b 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_cmd.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_cmd.c
@@ -17,7 +17,6 @@
 #include <linux/time.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/hrtimer.h>
 #include <linux/delay.h>
 #include <linux/io.h>
 #include <linux/semaphore.h>
@@ -32,18 +31,525 @@
 #include "mdp4.h"
 #include "mipi_dsi.h"
 
-static struct mdp4_overlay_pipe *dsi_pipe;
-static struct msm_fb_data_type *dsi_mfd;
-static int busy_wait_cnt;
 static int dsi_state;
-static unsigned long  tout_expired;
 
 #define TOUT_PERIOD	HZ	/* 1 second */
 #define MS_100		(HZ/10)	/* 100 ms */
 
 static int vsync_start_y_adjust = 4;
 
-struct timer_list dsi_clock_timer;
+#define MAX_CONTROLLER	1
+#define VSYNC_EXPIRE_TICK 2
+#define BACKLIGHT_MAX 4
+
+struct backlight {
+	int put;
+	int get;
+	int tot;
+	int blist[BACKLIGHT_MAX];
+};
+
+static struct vsycn_ctrl {
+	struct device *dev;
+	int inited;
+	int update_ndx;
+	int expire_tick;
+	uint32 dmap_intr_tot;
+	uint32 rdptr_intr_tot;
+	uint32 rdptr_sirq_tot;
+	atomic_t suspend;
+	int dmap_wait_cnt;
+	int wait_vsync_cnt;
+	int commit_cnt;
+	struct mutex update_lock;
+	struct completion dmap_comp;
+	struct completion vsync_comp;
+	spinlock_t dmap_spin_lock;
+	spinlock_t spin_lock;
+	struct mdp4_overlay_pipe *base_pipe;
+	struct vsync_update vlist[2];
+	struct backlight blight;
+	int vsync_irq_enabled;
+	ktime_t vsync_time;
+	struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+static void vsync_irq_enable(int intr, int term)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	/* no need to clrear other interrupts for comamnd mode */
+	outp32(MDP_INTR_CLEAR, INTR_PRIMARY_RDPTR);
+	mdp_intr_mask |= intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_enable_irq(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+static void vsync_irq_disable(int intr, int term)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	/* no need to clrear other interrupts for comamnd mode */
+	mdp_intr_mask &= ~intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_disable_irq_nosync(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+}
+
+static int mdp4_backlight_get_level(struct vsycn_ctrl *vctrl)
+{
+	int level = -1;
+
+	mutex_lock(&vctrl->update_lock);
+	if (vctrl->blight.tot) {
+		level = vctrl->blight.blist[vctrl->blight.get];
+		vctrl->blight.get++;
+		vctrl->blight.get %= BACKLIGHT_MAX;
+		vctrl->blight.tot--;
+		pr_debug("%s: tot=%d put=%d get=%d level=%d\n", __func__,
+		vctrl->blight.tot, vctrl->blight.put, vctrl->blight.get, level);
+	}
+	mutex_unlock(&vctrl->update_lock);
+	return level;
+}
+
+void mdp4_backlight_put_level(int cndx, int level)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	mutex_lock(&vctrl->update_lock);
+	vctrl->blight.blist[vctrl->blight.put] = level;
+	vctrl->blight.put++;
+	vctrl->blight.put %= BACKLIGHT_MAX;
+	if (vctrl->blight.tot == BACKLIGHT_MAX) {
+		/* drop the oldest one */
+		vctrl->blight.get++;
+		vctrl->blight.get %= BACKLIGHT_MAX;
+	} else {
+		vctrl->blight.tot++;
+	}
+	mutex_unlock(&vctrl->update_lock);
+	pr_debug("%s: tot=%d put=%d get=%d level=%d\n", __func__,
+		vctrl->blight.tot, vctrl->blight.put, vctrl->blight.get, level);
+
+	if (mdp4_overlay_dsi_state_get() <= ST_DSI_SUSPEND)
+		return;
+}
+
+static int mdp4_backlight_commit_level(struct vsycn_ctrl *vctrl)
+{
+	int level;
+	int cnt = 0;
+
+	if (vctrl->blight.tot) { /* has new backlight */
+		if (mipi_dsi_ctrl_lock(0)) {
+			level = mdp4_backlight_get_level(vctrl);
+			mipi_dsi_cmd_backlight_tx(level);
+			cnt++;
+		}
+	}
+
+	return cnt;
+}
+
+void mdp4_blt_dmap_cfg(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 off, addr;
+	int bpp;
+
+	if (pipe->ov_blt_addr == 0)
+		return;
+
+#ifdef BLT_RGB565
+	bpp = 2; /* overlay ouput is RGB565 */
+#else
+	bpp = 3; /* overlay ouput is RGB888 */
+#endif
+	off = 0;
+	if (pipe->blt_dmap_done & 0x01)
+		off = pipe->src_height * pipe->src_width * bpp;
+	addr = pipe->dma_blt_addr + off;
+
+	/* dmap */
+	MDP_OUTP(MDP_BASE + 0x90008, addr);
+}
+
+
+void mdp4_blt_overlay0_cfg(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 off, addr;
+	int bpp;
+	char *overlay_base;
+
+	if (pipe->ov_blt_addr == 0)
+		return;
+
+#ifdef BLT_RGB565
+	bpp = 2; /* overlay ouput is RGB565 */
+#else
+	bpp = 3; /* overlay ouput is RGB888 */
+#endif
+	off = 0;
+	if (pipe->blt_ov_done & 0x01)
+		off = pipe->src_height * pipe->src_width * bpp;
+	addr = pipe->ov_blt_addr + off;
+	/* overlay 0 */
+	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
+	outpdw(overlay_base + 0x000c, addr);
+	outpdw(overlay_base + 0x001c, addr);
+}
+
+static void vsync_commit_kickoff_dmap(struct mdp4_overlay_pipe *pipe)
+{
+	if (mipi_dsi_ctrl_lock(1)) {
+		mdp4_stat.kickoff_dmap++;
+		pipe->blt_dmap_koff++;
+		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+		outpdw(MDP_BASE + 0x000c, 0); /* kickoff dmap engine */
+		mb();
+	}
+}
+
+static void vsync_commit_kickoff_ov0(struct mdp4_overlay_pipe *pipe, int blt)
+{
+	int locked = 1;
+
+	if (blt)
+		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+	else
+		locked = mipi_dsi_ctrl_lock(1);
+
+	if (locked) {
+		mdp4_stat.kickoff_ov0++;
+		pipe->blt_ov_koff++;
+		outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
+		mb();
+	}
+}
+
+/*
+ * mdp4_dsi_cmd_do_update:
+ * called from thread context
+ */
+void mdp4_dsi_cmd_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pp;
+	int undx;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+
+	pp = &vp->plist[pipe->pipe_ndx - 1];	/* ndx start form 1 */
+
+	pr_debug("%s: vndx=%d pipe_ndx=%d expire=%x pid=%d\n", __func__,
+		undx, pipe->pipe_ndx, vctrl->expire_tick, current->pid);
+
+	*pp = *pipe;	/* keep it */
+	vp->update_cnt++;
+
+	if (vctrl->expire_tick == 0) {
+		mipi_dsi_clk_cfg(1);
+		mdp_clk_ctrl(1);
+		vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
+	}
+	vctrl->expire_tick = VSYNC_EXPIRE_TICK;
+	mutex_unlock(&vctrl->update_lock);
+}
+
+int mdp4_dsi_cmd_pipe_commit(void)
+{
+
+	int  i, undx, cnt;
+	int mixer = 0;
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+	int diff;
+
+	vctrl = &vsync_ctrl_db[0];
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+	pipe = vctrl->base_pipe;
+	mixer = pipe->mixer_num;
+
+	pr_debug("%s: vndx=%d cnt=%d expire=%x pid=%d\n", __func__,
+		undx, vp->update_cnt, vctrl->expire_tick, current->pid);
+
+	cnt = 0;
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return cnt;
+	}
+	vctrl->update_ndx++;
+	vctrl->update_ndx &= 0x01;
+	vctrl->commit_cnt++;
+	vp->update_cnt = 0;	/* reset */
+	mutex_unlock(&vctrl->update_lock);
+
+	mdp4_backlight_commit_level(vctrl);
+
+	/* free previous committed iommu back to pool */
+	mdp4_overlay_iommu_unmap_freelist(mixer);
+
+	pipe = vp->plist;
+	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+		if (pipe->pipe_used) {
+			cnt++;
+			mdp4_overlay_vsync_commit(pipe);
+			/* free previous iommu to freelist
+			 * which will be freed at next
+			 * pipe_commit
+			 */
+			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+			pipe->pipe_used = 0; /* clear */
+		}
+	}
+	mdp4_mixer_stage_commit(mixer);
+
+
+	pr_debug("%s: intr=%d expire=%d cpu=%d\n", __func__,
+		vctrl->rdptr_intr_tot, vctrl->expire_tick, smp_processor_id());
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	pipe = vctrl->base_pipe;
+	if (pipe->blt_changed) {
+		/* blt configurtion changed */
+		pipe->blt_changed = 0;
+		mdp4_overlayproc_cfg(pipe);
+		mdp4_overlay_dmap_xy(pipe);
+	}
+
+	if (pipe->ov_blt_addr) {
+		diff = pipe->blt_ov_koff - pipe->blt_ov_done;
+		if (diff < 1) {
+			mdp4_blt_overlay0_cfg(pipe);
+			vsync_commit_kickoff_ov0(pipe, 1);
+		}
+	} else {
+		vsync_commit_kickoff_ov0(pipe, 0);
+	}
+
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	return cnt;
+}
+
+void mdp4_dsi_cmd_vsync_ctrl(int cndx, int enable)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (vctrl->vsync_irq_enabled == enable)
+		return;
+
+	vctrl->vsync_irq_enabled = enable;
+
+	mutex_lock(&vctrl->update_lock);
+	if (enable) {
+		mipi_dsi_clk_cfg(1);
+		mdp_clk_ctrl(1);
+		vsync_irq_enable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
+	} else {
+		mipi_dsi_clk_cfg(0);
+		mdp_clk_ctrl(0);
+		vsync_irq_disable(INTR_PRIMARY_RDPTR, MDP_PRIM_RDPTR_TERM);
+		vctrl->expire_tick = 0;
+	}
+	mutex_unlock(&vctrl->update_lock);
+}
+
+void mdp4_dsi_cmd_wait4vsync(int cndx, long long *vtime)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (vctrl->wait_vsync_cnt == 0)
+		INIT_COMPLETION(vctrl->vsync_comp);
+	vctrl->wait_vsync_cnt++;
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	wait_for_completion(&vctrl->vsync_comp);
+
+	*vtime = ktime_to_ns(vctrl->vsync_time);
+}
+
+
+/*
+ * primary_rdptr_isr:
+ * called from interrupt context
+ */
+
+static void primary_rdptr_isr(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+	vctrl->rdptr_intr_tot++;
+	vctrl->vsync_time = ktime_get();
+	schedule_work(&vctrl->vsync_work);
+}
+
+void mdp4_dmap_done_dsi_cmd(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	int diff;
+
+	vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+
+	vctrl = &vsync_ctrl_db[cndx];
+	vctrl->dmap_intr_tot++;
+	pipe = vctrl->base_pipe;
+
+	if (pipe->ov_blt_addr == 0) {
+		mdp4_overlay_dma_commit(cndx);
+		return;
+	}
+
+	 /* blt enabled */
+	spin_lock(&vctrl->spin_lock);
+	pipe->blt_dmap_done++;
+	diff = pipe->blt_ov_done - pipe->blt_dmap_done;
+	spin_unlock(&vctrl->spin_lock);
+	pr_debug("%s: ov_done=%d dmap_done=%d ov_koff=%d dmap_koff=%d\n",
+			__func__, pipe->blt_ov_done, pipe->blt_dmap_done,
+				pipe->blt_ov_koff, pipe->blt_dmap_koff);
+	if (diff <= 0) {
+		if (pipe->blt_end) {
+			pipe->blt_end = 0;
+			pipe->ov_blt_addr = 0;
+			pipe->dma_blt_addr = 0;
+			pipe->blt_changed = 1;
+			pr_info("%s: BLT-END\n", __func__);
+		}
+	}
+	spin_unlock(&dsi_clk_lock);
+}
+
+/*
+ * mdp4_overlay0_done_dsi_cmd: called from isr
+ */
+void mdp4_overlay0_done_dsi_cmd(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	int diff;
+
+	vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	spin_lock(&vctrl->spin_lock);
+	pipe->blt_ov_done++;
+	diff = pipe->blt_ov_done - pipe->blt_dmap_done;
+	spin_unlock(&vctrl->spin_lock);
+
+	pr_debug("%s: ov_done=%d dmap_done=%d ov_koff=%d dmap_koff=%d diff=%d\n",
+			__func__, pipe->blt_ov_done, pipe->blt_dmap_done,
+			pipe->blt_ov_koff, pipe->blt_dmap_koff, diff);
+
+	if (pipe->ov_blt_addr == 0) {
+		/* blt disabled */
+		pr_debug("%s: NON-BLT\n", __func__);
+		return;
+	}
+
+	if (diff == 1) {
+		mdp4_blt_dmap_cfg(pipe);
+		vsync_commit_kickoff_dmap(pipe);
+	}
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+	struct vsycn_ctrl *vctrl =
+		container_of(work, typeof(*vctrl), vsync_work);
+	char buf[64];
+	char *envp[2];
+
+	snprintf(buf, sizeof(buf), "VSYNC=%llu",
+			ktime_to_ns(vctrl->vsync_time));
+	envp[0] = buf;
+	envp[1] = NULL;
+	kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+
+void mdp4_dsi_rdptr_init(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->inited)
+		return;
+
+	vctrl->inited = 1;
+	vctrl->update_ndx = 0;
+	vctrl->blight.put = 0;
+	vctrl->blight.get = 0;
+	vctrl->blight.tot = 0;
+	mutex_init(&vctrl->update_lock);
+	init_completion(&vctrl->vsync_comp);
+	init_completion(&vctrl->dmap_comp);
+	spin_lock_init(&vctrl->spin_lock);
+	spin_lock_init(&vctrl->dmap_spin_lock);
+	INIT_WORK(&vctrl->vsync_work, send_vsync_work);
+}
+
+void mdp4_primary_rdptr(void)
+{
+	primary_rdptr_isr(0);
+}
 
 void mdp4_overlay_dsi_state_set(int state)
 {
@@ -59,18 +565,6 @@
 	return dsi_state;
 }
 
-static void dsi_clock_tout(unsigned long data)
-{
-	spin_lock(&dsi_clk_lock);
-	if (mipi_dsi_clk_on) {
-		if (dsi_state == ST_DSI_PLAYING) {
-			mipi_dsi_turn_off_clks();
-			mdp4_overlay_dsi_state_set(ST_DSI_CLK_OFF);
-		}
-	}
-	spin_unlock(&dsi_clk_lock);
-}
-
 static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
 {
 	/*
@@ -86,11 +580,6 @@
 		return xres * bpp;
 }
 
-void mdp4_dsi_cmd_del_timer(void)
-{
-	del_timer_sync(&dsi_clock_timer);
-}
-
 void mdp4_mipi_vsync_enable(struct msm_fb_data_type *mfd,
 		struct mdp4_overlay_pipe *pipe, int which)
 {
@@ -121,67 +610,98 @@
 	}
 }
 
-void mdp4_dsi_cmd_base_swap(struct mdp4_overlay_pipe *pipe)
+void mdp4_dsi_cmd_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
 {
-	dsi_pipe = pipe;
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	vctrl->base_pipe = pipe;
+}
+
+static void mdp4_overlay_setup_pipe_addr(struct msm_fb_data_type *mfd,
+			struct mdp4_overlay_pipe *pipe)
+{
+	MDPIBUF *iBuf = &mfd->ibuf;
+	struct fb_info *fbi;
+	int bpp;
+	uint8 *src;
+
+	/* whole screen for base layer */
+	src = (uint8 *) iBuf->buf;
+	fbi = mfd->fbi;
+
+	if (pipe->is_3d) {
+		bpp = fbi->var.bits_per_pixel / 8;
+		pipe->src_height = pipe->src_height_3d;
+		pipe->src_width = pipe->src_width_3d;
+		pipe->src_h = pipe->src_height_3d;
+		pipe->src_w = pipe->src_width_3d;
+		pipe->dst_h = pipe->src_height_3d;
+		pipe->dst_w = pipe->src_width_3d;
+		pipe->srcp0_ystride = msm_fb_line_length(0,
+						pipe->src_width, bpp);
+	} else {
+		 /* 2D */
+		pipe->src_height = fbi->var.yres;
+		pipe->src_width = fbi->var.xres;
+		pipe->src_h = fbi->var.yres;
+		pipe->src_w = fbi->var.xres;
+		pipe->dst_h = fbi->var.yres;
+		pipe->dst_w = fbi->var.xres;
+		pipe->srcp0_ystride = fbi->fix.line_length;
+	}
+	pipe->src_y = 0;
+	pipe->src_x = 0;
+	pipe->dst_y = 0;
+	pipe->dst_x = 0;
+	pipe->srcp0_addr = (uint32)src;
 }
 
 void mdp4_overlay_update_dsi_cmd(struct msm_fb_data_type *mfd)
 {
-	MDPIBUF *iBuf = &mfd->ibuf;
-	uint8 *src;
 	int ptype;
 	struct mdp4_overlay_pipe *pipe;
-	int bpp;
 	int ret;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+
 
 	if (mfd->key != MFD_KEY)
 		return;
 
-	dsi_mfd = mfd;		/* keep it */
+	vctrl = &vsync_ctrl_db[cndx];
 
 	/* MDP cmd block enable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	mdp_clk_ctrl(1);
 
-	if (dsi_pipe == NULL) {
-		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
-		if (ptype < 0)
-			printk(KERN_INFO "%s: format2type failed\n", __func__);
-		pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
-		if (pipe == NULL)
-			printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
-		pipe->pipe_used++;
-		pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
-		pipe->mixer_num  = MDP4_MIXER0;
-		pipe->src_format = mfd->fb_imgType;
-		mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_CMD);
-		ret = mdp4_overlay_format2pipe(pipe);
-		if (ret < 0)
-			printk(KERN_INFO "%s: format2type failed\n", __func__);
-
-		init_timer(&dsi_clock_timer);
-		dsi_clock_timer.function = dsi_clock_tout;
-		dsi_clock_timer.data = (unsigned long) mfd;;
-		dsi_clock_timer.expires = 0xffffffff;
-		add_timer(&dsi_clock_timer);
-		tout_expired = jiffies;
-
-		dsi_pipe = pipe; /* keep it */
-
-		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
-		pipe->ov_blt_addr = 0;
-		pipe->dma_blt_addr = 0;
-
-	} else {
-		pipe = dsi_pipe;
-	}
-
-	if (pipe->pipe_used == 0 ||
-			pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
-		pr_err("%s: NOT baselayer\n", __func__);
-		mutex_unlock(&mfd->dma->ov_mutex);
+	ptype = mdp4_overlay_format2type(mfd->fb_imgType);
+	if (ptype < 0)
+		printk(KERN_INFO "%s: format2type failed\n", __func__);
+	pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
+	if (pipe == NULL) {
+		printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
 		return;
 	}
+	pipe->pipe_used++;
+	pipe->mixer_stage  = MDP4_MIXER_STAGE_BASE;
+	pipe->mixer_num  = MDP4_MIXER0;
+	pipe->src_format = mfd->fb_imgType;
+	mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_CMD);
+	ret = mdp4_overlay_format2pipe(pipe);
+	if (ret < 0)
+		printk(KERN_INFO "%s: format2type failed\n", __func__);
+
+	vctrl->base_pipe = pipe; /* keep it */
+	mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
+	pipe->ov_blt_addr = 0;
+	pipe->dma_blt_addr = 0;
+
+	MDP_OUTP(MDP_BASE + 0x021c, 0x10); /* read pointer */
 
 	/*
 	 * configure dsi stream id
@@ -190,41 +710,8 @@
 	MDP_OUTP(MDP_BASE + 0x000a0, 0x10);
 	/* disable dsi trigger */
 	MDP_OUTP(MDP_BASE + 0x000a4, 0x00);
-	/* whole screen for base layer */
-	src = (uint8 *) iBuf->buf;
 
-
-	{
-		struct fb_info *fbi;
-
-		fbi = mfd->fbi;
-		if (pipe->is_3d) {
-			bpp = fbi->var.bits_per_pixel / 8;
-			pipe->src_height = pipe->src_height_3d;
-			pipe->src_width = pipe->src_width_3d;
-			pipe->src_h = pipe->src_height_3d;
-			pipe->src_w = pipe->src_width_3d;
-			pipe->dst_h = pipe->src_height_3d;
-			pipe->dst_w = pipe->src_width_3d;
-			pipe->srcp0_ystride = msm_fb_line_length(0,
-						pipe->src_width, bpp);
-		} else {
-			 /* 2D */
-			pipe->src_height = fbi->var.yres;
-			pipe->src_width = fbi->var.xres;
-			pipe->src_h = fbi->var.yres;
-			pipe->src_w = fbi->var.xres;
-			pipe->dst_h = fbi->var.yres;
-			pipe->dst_w = fbi->var.xres;
-			pipe->srcp0_ystride = fbi->fix.line_length;
-		}
-		pipe->src_y = 0;
-		pipe->src_x = 0;
-		pipe->dst_y = 0;
-		pipe->dst_x = 0;
-		pipe->srcp0_addr = (uint32)src;
-	}
-
+	mdp4_overlay_setup_pipe_addr(mfd, pipe);
 
 	mdp4_overlay_rgb_setup(pipe);
 
@@ -238,10 +725,8 @@
 
 	mdp4_overlay_dmap_cfg(mfd, 0);
 
-	mdp4_mipi_vsync_enable(mfd, pipe, 0);
-
 	/* MDP cmd block disable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	mdp_clk_ctrl(0);
 
 	wmb();
 }
@@ -251,18 +736,18 @@
 				struct msmfb_overlay_3d *r3d)
 {
 	struct fb_info *fbi;
-	struct mdp4_overlay_pipe *pipe;
 	int bpp;
 	uint8 *src = NULL;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
-	if (dsi_pipe == NULL)
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	if (pipe == NULL)
 		return;
 
-	dsi_pipe->is_3d = r3d->is_3d;
-	dsi_pipe->src_height_3d = r3d->height;
-	dsi_pipe->src_width_3d = r3d->width;
-
-	pipe = dsi_pipe;
 	if (pipe->pipe_used == 0 ||
 			pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
 		pr_err("%s: NOT baselayer\n", __func__);
@@ -270,16 +755,15 @@
 		return;
 	}
 
+	pipe->is_3d = r3d->is_3d;
+	pipe->src_height_3d = r3d->height;
+	pipe->src_width_3d = r3d->width;
+
 	if (pipe->is_3d)
 		mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_SIDE_BY_SIDE);
 	else
 		mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_NONE);
 
-	if (mfd->panel_power_on) {
-		mdp4_dsi_cmd_dma_busy_wait(mfd);
-		mdp4_dsi_blt_dmap_busy_wait(mfd);
-	}
-
 	fbi = mfd->fbi;
 	if (pipe->is_3d) {
 		bpp = fbi->var.bits_per_pixel / 8;
@@ -328,28 +812,38 @@
 int mdp4_dsi_overlay_blt_start(struct msm_fb_data_type *mfd)
 {
 	unsigned long flag;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
-	pr_debug("%s: blt_end=%d ov_blt_addr=%x pid=%d\n",
-	__func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr, current->pid);
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	pr_debug("%s: blt_end=%d blt_addr=%x pid=%d\n",
+		 __func__, pipe->blt_end, (int)pipe->ov_blt_addr, current->pid);
 
 	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
 
 	if (mfd->ov0_wb_buf->write_addr == 0) {
-		pr_info("%s: no blt_base assigned\n", __func__);
+		pr_err("%s: no blt_base assigned\n", __func__);
 		return -EBUSY;
 	}
 
-	if (dsi_pipe->ov_blt_addr == 0) {
-		mdp4_dsi_cmd_dma_busy_wait(mfd);
-		spin_lock_irqsave(&mdp_spin_lock, flag);
-		dsi_pipe->blt_end = 0;
-		dsi_pipe->blt_cnt = 0;
-		dsi_pipe->ov_cnt = 0;
-		dsi_pipe->dmap_cnt = 0;
-		dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
-		dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+	if (pipe->ov_blt_addr == 0) {
+		spin_lock_irqsave(&vctrl->spin_lock, flag);
+		pipe->blt_end = 0;
+		pipe->blt_cnt = 0;
+		pipe->blt_changed = 1;
+		pipe->ov_cnt = 0;
+		pipe->dmap_cnt = 0;
+		pipe->blt_ov_koff = 0;
+		pipe->blt_dmap_koff = 0;
+		pipe->blt_ov_done = 0;
+		pipe->blt_dmap_done = 0;
+		pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+		pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
 		mdp4_stat.blt_dsi_cmd++;
-		spin_unlock_irqrestore(&mdp_spin_lock, flag);
+		spin_unlock_irqrestore(&vctrl->spin_lock, flag);
 		return 0;
 	}
 
@@ -359,32 +853,26 @@
 int mdp4_dsi_overlay_blt_stop(struct msm_fb_data_type *mfd)
 {
 	unsigned long flag;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
 
-	pr_debug("%s: blt_end=%d ov_blt_addr=%x\n",
-		 __func__, dsi_pipe->blt_end, (int)dsi_pipe->ov_blt_addr);
+	pr_info("%s: blt_end=%d blt_addr=%x pid=%d\n",
+		 __func__, pipe->blt_end, (int)pipe->ov_blt_addr, current->pid);
 
-	if ((dsi_pipe->blt_end == 0) && dsi_pipe->ov_blt_addr) {
-		spin_lock_irqsave(&mdp_spin_lock, flag);
-		dsi_pipe->blt_end = 1;	/* mark as end */
-		spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	if ((pipe->blt_end == 0) && pipe->ov_blt_addr) {
+		spin_lock_irqsave(&vctrl->spin_lock, flag);
+		pipe->blt_end = 1;	/* mark as end */
+		spin_unlock_irqrestore(&vctrl->spin_lock, flag);
 		return 0;
 	}
 
 	return -EBUSY;
 }
 
-int mdp4_dsi_overlay_blt_offset(struct msm_fb_data_type *mfd,
-					struct msmfb_overlay_blt *req)
-{
-	req->offset = 0;
-	req->width = dsi_pipe->src_width;
-	req->height = dsi_pipe->src_height;
-	req->bpp = dsi_pipe->bpp;
-
-	return sizeof(*req);
-}
-
 void mdp4_dsi_overlay_blt(struct msm_fb_data_type *mfd,
 					struct msmfb_overlay_blt *req)
 {
@@ -395,332 +883,123 @@
 
 }
 
-void mdp4_blt_xy_update(struct mdp4_overlay_pipe *pipe)
+int mdp4_dsi_cmd_on(struct platform_device *pdev)
 {
-	uint32 off, addr, addr2;
-	int bpp;
-	char *overlay_base;
+	int ret = 0;
+	int cndx = 0;
+	struct msm_fb_data_type *mfd;
+	struct vsycn_ctrl *vctrl;
 
+	pr_info("%s+:\n", __func__);
 
-	if (pipe->ov_blt_addr == 0)
-		return;
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
+	vctrl = &vsync_ctrl_db[cndx];
+	vctrl->dev = mfd->fbi->dev;
 
-#ifdef BLT_RGB565
-	bpp = 2; /* overlay ouput is RGB565 */
-#else
-	bpp = 3; /* overlay ouput is RGB888 */
-#endif
-	off = 0;
-	if (pipe->dmap_cnt & 0x01)
-		off = pipe->src_height * pipe->src_width * bpp;
-	addr = pipe->dma_blt_addr + off;
+	mdp_clk_ctrl(1);
 
-	/* dmap */
-	MDP_OUTP(MDP_BASE + 0x90008, addr);
-
-	off = 0;
-	if (pipe->ov_cnt & 0x01)
-		off = pipe->src_height * pipe->src_width * bpp;
-	addr2 = pipe->ov_blt_addr + off;
-	/* overlay 0 */
-	overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
-	outpdw(overlay_base + 0x000c, addr2);
-	outpdw(overlay_base + 0x001c, addr2);
-}
-
-
-/*
- * mdp4_dmap_done_dsi: called from isr
- * DAM_P_DONE only used when blt enabled
- */
-void mdp4_dma_p_done_dsi(struct mdp_dma_data *dma)
-{
-	int diff;
-
-	dsi_pipe->dmap_cnt++;
-	diff = dsi_pipe->ov_cnt - dsi_pipe->dmap_cnt;
-	pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
-			__func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
-
-	if (diff <= 0) {
-		spin_lock(&mdp_spin_lock);
-		dma->dmap_busy = FALSE;
-		complete(&dma->dmap_comp);
-		spin_unlock(&mdp_spin_lock);
-		if (dsi_pipe->blt_end) {
-			dsi_pipe->blt_end = 0;
-			dsi_pipe->dma_blt_addr = 0;
-			dsi_pipe->ov_blt_addr = 0;
-			pr_debug("%s: END, ov_cnt=%d dmap_cnt=%d\n",
-				__func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
-			mdp_intr_mask &= ~INTR_DMA_P_DONE;
-			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-		}
-		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-		mdp_disable_irq_nosync(MDP_DMA2_TERM);  /* disable intr */
-		return;
-	}
-
-	spin_lock(&mdp_spin_lock);
-	dma->busy = FALSE;
-	spin_unlock(&mdp_spin_lock);
-	complete(&dma->comp);
-	if (busy_wait_cnt)
-		busy_wait_cnt--;
-
-	pr_debug("%s: kickoff dmap\n", __func__);
-
-	mdp4_blt_xy_update(dsi_pipe);
-	/* kick off dmap */
-	outpdw(MDP_BASE + 0x000c, 0x0);
-	mdp4_stat.kickoff_dmap++;
-	/* trigger dsi cmd engine */
-	mipi_dsi_cmd_mdp_start();
-
-	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-}
-
-
-/*
- * mdp4_overlay0_done_dsi_cmd: called from isr
- */
-void mdp4_overlay0_done_dsi_cmd(struct mdp_dma_data *dma)
-{
-	int diff;
-
-	if (dsi_pipe->ov_blt_addr == 0) {
-		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
-		spin_lock(&mdp_spin_lock);
-		dma->busy = FALSE;
-		spin_unlock(&mdp_spin_lock);
-		complete(&dma->comp);
-		if (busy_wait_cnt)
-			busy_wait_cnt--;
-		mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-		return;
-	}
-
-	/* blt enabled */
-	if (dsi_pipe->blt_end == 0)
-		dsi_pipe->ov_cnt++;
-
-	pr_debug("%s: ov_cnt=%d dmap_cnt=%d\n",
-			__func__, dsi_pipe->ov_cnt, dsi_pipe->dmap_cnt);
-
-	if (dsi_pipe->blt_cnt == 0) {
-		/* first kickoff since blt enabled */
-		mdp_intr_mask |= INTR_DMA_P_DONE;
-		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-	}
-	dsi_pipe->blt_cnt++;
-
-	diff = dsi_pipe->ov_cnt - dsi_pipe->dmap_cnt;
-	if (diff >= 2) {
-		mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-		return;
-	}
-
-	spin_lock(&mdp_spin_lock);
-	dma->busy = FALSE;
-	dma->dmap_busy = TRUE;
-	spin_unlock(&mdp_spin_lock);
-	complete(&dma->comp);
-	if (busy_wait_cnt)
-		busy_wait_cnt--;
-
-	pr_debug("%s: kickoff dmap\n", __func__);
-
-	mdp4_blt_xy_update(dsi_pipe);
-	mdp_enable_irq(MDP_DMA2_TERM);	/* enable intr */
-	/* kick off dmap */
-	outpdw(MDP_BASE + 0x000c, 0x0);
-	mdp4_stat.kickoff_dmap++;
-	/* trigger dsi cmd engine */
-	mipi_dsi_cmd_mdp_start();
-	mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-}
-
-void mdp4_dsi_cmd_overlay_restore(void)
-{
-	/* mutex holded by caller */
-	if (dsi_mfd && dsi_pipe) {
-		mdp4_dsi_cmd_dma_busy_wait(dsi_mfd);
-		mipi_dsi_mdp_busy_wait(dsi_mfd);
-		mdp4_overlay_update_dsi_cmd(dsi_mfd);
-
-		if (dsi_pipe->ov_blt_addr)
-			mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
-		mdp4_dsi_cmd_overlay_kickoff(dsi_mfd, dsi_pipe);
-	}
-}
-
-void mdp4_dsi_blt_dmap_busy_wait(struct msm_fb_data_type *mfd)
-{
-	unsigned long flag;
-	int need_wait = 0;
-
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (mfd->dma->dmap_busy == TRUE) {
-		INIT_COMPLETION(mfd->dma->dmap_comp);
-		need_wait++;
-	}
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
-	if (need_wait) {
-		/* wait until DMA finishes the current job */
-		wait_for_completion(&mfd->dma->dmap_comp);
-	}
-}
-
-/*
- * mdp4_dsi_cmd_dma_busy_wait: check dsi link activity
- * dsi link is a shared resource and it can only be used
- * while it is in idle state.
- * ov_mutex need to be acquired before call this function.
- */
-void mdp4_dsi_cmd_dma_busy_wait(struct msm_fb_data_type *mfd)
-{
-	unsigned long flag;
-	int need_wait = 0;
-
-
-
-	if (dsi_clock_timer.function) {
-		if (time_after(jiffies, tout_expired)) {
-			tout_expired = jiffies + TOUT_PERIOD;
-			mod_timer(&dsi_clock_timer, tout_expired);
-			tout_expired -= MS_100;
-		}
-	}
-
-	pr_debug("%s: start pid=%d dsi_clk_on=%d\n",
-			__func__, current->pid, mipi_dsi_clk_on);
-
-	/* satrt dsi clock if necessary */
-	spin_lock_bh(&dsi_clk_lock);
-	if (mipi_dsi_clk_on == 0)
-		mipi_dsi_turn_on_clks();
-	spin_unlock_bh(&dsi_clk_lock);
-
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (mfd->dma->busy == TRUE) {
-		if (busy_wait_cnt == 0)
-			INIT_COMPLETION(mfd->dma->comp);
-		busy_wait_cnt++;
-		need_wait++;
-	}
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
-	if (need_wait) {
-		/* wait until DMA finishes the current job */
-		pr_debug("%s: pending pid=%d dsi_clk_on=%d\n",
-				__func__, current->pid, mipi_dsi_clk_on);
-		wait_for_completion(&mfd->dma->comp);
-	}
-	pr_debug("%s: done pid=%d dsi_clk_on=%d\n",
-			 __func__, current->pid, mipi_dsi_clk_on);
-}
-
-void mdp4_dsi_cmd_kickoff_video(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
-{
-	/*
-	 * a video kickoff may happen before UI kickoff after
-	 * blt enabled. mdp4_overlay_update_dsi_cmd() need
-	 * to be called before kickoff.
-	 * vice versa for blt disabled.
-	 */
-	if (dsi_pipe->ov_blt_addr && dsi_pipe->blt_cnt == 0)
-		mdp4_overlay_update_dsi_cmd(mfd); /* first time */
-	else if (dsi_pipe->ov_blt_addr == 0  && dsi_pipe->blt_cnt) {
-		mdp4_overlay_update_dsi_cmd(mfd); /* last time */
-		dsi_pipe->blt_cnt = 0;
-	}
-
-	pr_debug("%s: ov_blt_addr=%d blt_cnt=%d\n",
-		__func__, (int)dsi_pipe->ov_blt_addr, dsi_pipe->blt_cnt);
-
-	if (dsi_pipe->ov_blt_addr)
-		mdp4_dsi_blt_dmap_busy_wait(dsi_mfd);
-
-	mdp4_dsi_cmd_overlay_kickoff(mfd, pipe);
-}
-
-void mdp4_dsi_cmd_kickoff_ui(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
-{
-
-	pr_debug("%s: pid=%d\n", __func__, current->pid);
-	mdp4_dsi_cmd_overlay_kickoff(mfd, pipe);
-}
-
-
-void mdp4_dsi_cmd_overlay_kickoff(struct msm_fb_data_type *mfd,
-				struct mdp4_overlay_pipe *pipe)
-{
-	unsigned long flag;
+	if (vctrl->base_pipe == NULL)
+		mdp4_overlay_update_dsi_cmd(mfd);
 
 	mdp4_iommu_attach();
-	/* change mdp clk */
-	mdp4_set_perf_level();
 
-	mipi_dsi_mdp_busy_wait(mfd);
+	atomic_set(&vctrl->suspend, 0);
+	pr_info("%s-:\n", __func__);
 
-	if (dsi_pipe->ov_blt_addr == 0)
-		mipi_dsi_cmd_mdp_start();
 
-	mdp4_overlay_dsi_state_set(ST_DSI_PLAYING);
+	return ret;
+}
 
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	mdp_enable_irq(MDP_OVERLAY0_TERM);
-	mfd->dma->busy = TRUE;
-	if (dsi_pipe->ov_blt_addr)
-		mfd->dma->dmap_busy = TRUE;
-	/* start OVERLAY pipe */
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-	mdp_pipe_kickoff(MDP_OVERLAY0_TERM, mfd);
-	mdp4_stat.kickoff_ov0++;
+int mdp4_dsi_cmd_off(struct platform_device *pdev)
+{
+	int ret = 0;
+	int cndx = 0;
+	struct msm_fb_data_type *mfd;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+
+	pr_info("%s+:\n", __func__);
+
+	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+	if (pipe == NULL) {
+		pr_err("%s: NO base pipe\n", __func__);
+		return ret;
+	}
+
+	atomic_set(&vctrl->suspend, 1);
+
+	/* make sure dsi clk is on so that
+	 * at panel_next_off() dsi panel can be shut off
+	 */
+	mipi_dsi_ahb_ctrl(1);
+	mipi_dsi_clk_enable();
+
+	mdp4_mixer_stage_down(pipe);
+	mdp4_overlay_pipe_free(pipe);
+	vctrl->base_pipe = NULL;
+
+	pr_info("%s-:\n", __func__);
+
+	/*
+	 * footswitch off
+	 * this will casue all mdp register
+	 * to be reset to default
+	 * after footswitch on later
+	 */
+
+	return ret;
 }
 
 void mdp_dsi_cmd_overlay_suspend(struct msm_fb_data_type *mfd)
 {
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
 	/* dis-engage rgb0 from mixer0 */
-	if (dsi_pipe) {
+	if (pipe) {
 		if (mfd->ref_cnt == 0) {
 			/* adb stop */
-			if (dsi_pipe->pipe_type == OVERLAY_TYPE_BF)
-				mdp4_overlay_borderfill_stage_down(dsi_pipe);
+			if (pipe->pipe_type == OVERLAY_TYPE_BF)
+				mdp4_overlay_borderfill_stage_down(pipe);
 
-			/* dsi_pipe == rgb1 */
-			mdp4_overlay_unset_mixer(dsi_pipe->mixer_num);
-			dsi_pipe = NULL;
+			/* pipe == rgb1 */
+			mdp4_overlay_unset_mixer(pipe->mixer_num);
+			vctrl->base_pipe = NULL;
 		} else {
-			mdp4_mixer_stage_down(dsi_pipe);
-			mdp4_iommu_unmap(dsi_pipe);
+			mdp4_mixer_stage_down(pipe);
+			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 1);
 		}
 	}
 }
 
 void mdp4_dsi_cmd_overlay(struct msm_fb_data_type *mfd)
 {
-	mutex_lock(&mfd->dma->ov_mutex);
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
-	if (mfd && mfd->panel_power_on) {
-		mdp4_dsi_cmd_dma_busy_wait(mfd);
+	vctrl = &vsync_ctrl_db[cndx];
 
-		if (dsi_pipe && dsi_pipe->ov_blt_addr)
-			mdp4_dsi_blt_dmap_busy_wait(mfd);
+	if (!mfd->panel_power_on)
+		return;
 
-		mdp4_overlay_update_dsi_cmd(mfd);
-
-		mdp4_dsi_cmd_kickoff_ui(mfd, dsi_pipe);
-		mdp4_iommu_unmap(dsi_pipe);
-	/* signal if pan function is waiting for the update completion */
-		if (mfd->pan_waiting) {
-			mfd->pan_waiting = FALSE;
-			complete(&mfd->pan_comp);
-		}
+	pipe = vctrl->base_pipe;
+	if (pipe == NULL) {
+		pr_err("%s: NO base pipe\n", __func__);
+		return;
 	}
-	mutex_unlock(&mfd->dma->ov_mutex);
+
+	if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE) {
+		mdp4_mipi_vsync_enable(mfd, pipe, 0);
+		mdp4_overlay_setup_pipe_addr(mfd, pipe);
+		mdp4_dsi_cmd_pipe_queue(0, pipe);
+	}
+	mdp4_dsi_cmd_pipe_commit();
 }
diff --git a/drivers/video/msm/mdp4_overlay_dsi_video.c b/drivers/video/msm/mdp4_overlay_dsi_video.c
index bc4476e..398b1e6 100644
--- a/drivers/video/msm/mdp4_overlay_dsi_video.c
+++ b/drivers/video/msm/mdp4_overlay_dsi_video.c
@@ -24,6 +24,9 @@
 #include <linux/spinlock.h>
 #include <linux/fb.h>
 #include <linux/msm_mdp.h>
+#include <linux/ktime.h>
+#include <linux/wakelock.h>
+#include <linux/time.h>
 #include <asm/system.h>
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -40,38 +43,362 @@
 static int first_pixel_start_y;
 static int dsi_video_enabled;
 
-static struct mdp4_overlay_pipe *dsi_pipe;
-static struct completion dsi_video_comp;
-static int blt_cfg_changed;
+#define MAX_CONTROLLER	1
 
-static cmd_fxn_t display_on;
+static struct vsycn_ctrl {
+	struct device *dev;
+	int inited;
+	int update_ndx;
+	int ov_koff;
+	int ov_done;
+	atomic_t suspend;
+	int wait_vsync_cnt;
+	int blt_change;
+	int blt_free;
+	int fake_vsync;
+	struct mutex update_lock;
+	struct completion ov_comp;
+	struct completion dmap_comp;
+	struct completion vsync_comp;
+	spinlock_t spin_lock;
+	struct msm_fb_data_type *mfd;
+	struct mdp4_overlay_pipe *base_pipe;
+	struct vsync_update vlist[2];
+	int vsync_irq_enabled;
+	ktime_t vsync_time;
+	struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
 
-static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
+static void vsync_irq_enable(int intr, int term)
 {
-	/*
-	 * The adreno GPU hardware requires that the pitch be aligned to
-	 * 32 pixels for color buffers, so for the cases where the GPU
-	 * is writing directly to fb0, the framebuffer pitch
-	 * also needs to be 32 pixel aligned
-	 */
+	unsigned long flag;
 
-	if (fb_index == 0)
-		return ALIGN(xres, 32) * bpp;
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	outp32(MDP_INTR_CLEAR,
+		INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+	mdp_intr_mask |= intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_enable_irq(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	pr_debug("%s: IRQ-en done, term=%x\n", __func__, term);
+}
+
+static void vsync_irq_disable(int intr, int term)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	outp32(MDP_INTR_CLEAR,
+		INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+	mdp_intr_mask &= ~intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_disable_irq_nosync(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term);
+}
+
+static void mdp4_overlay_dsi_video_start(void)
+{
+	if (!dsi_video_enabled) {
+		/* enable DSI block */
+		mdp4_iommu_attach();
+		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
+		dsi_video_enabled = 1;
+	}
+}
+
+/*
+ * mdp4_dsi_video_pipe_queue:
+ * called from thread context
+ */
+void mdp4_dsi_video_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pp;
+	int undx;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	/* start timing generator & mmu if they are not started yet */
+	mdp4_overlay_dsi_video_start();
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+
+	pp = &vp->plist[pipe->pipe_ndx - 1];	/* ndx start form 1 */
+
+	pr_debug("%s: vndx=%d pipe=%x ndx=%d num=%d pid=%d\n",
+		 __func__, undx, (int)pipe, pipe->pipe_ndx, pipe->pipe_num,
+		current->pid);
+
+	*pp = *pipe;	/* keep it */
+	vp->update_cnt++;
+	mutex_unlock(&vctrl->update_lock);
+	mdp4_stat.overlay_play[pipe->mixer_num]++;
+}
+
+static void mdp4_dsi_video_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+static void mdp4_dsi_video_wait4dmap(int cndx);
+static void mdp4_dsi_video_wait4ov(int cndx);
+
+int mdp4_dsi_video_pipe_commit(void)
+{
+
+	int  i, undx;
+	int mixer = 0;
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+	int cnt = 0;
+
+	vctrl = &vsync_ctrl_db[0];
+
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+	pipe = vctrl->base_pipe;
+	mixer = pipe->mixer_num;
+
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return cnt;
+	}
+
+	vctrl->update_ndx++;
+	vctrl->update_ndx &= 0x01;
+	vp->update_cnt = 0;     /* reset */
+	if (vctrl->blt_free) {
+		vctrl->blt_free--;
+		if (vctrl->blt_free == 0)
+			mdp4_free_writeback_buf(vctrl->mfd, mixer);
+	}
+	mutex_unlock(&vctrl->update_lock);
+
+	/* free previous committed iommu back to pool */
+	mdp4_overlay_iommu_unmap_freelist(mixer);
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (vctrl->ov_koff != vctrl->ov_done) {
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+		pr_err("%s: Error, frame dropped %d %d\n", __func__,
+				vctrl->ov_koff, vctrl->ov_done);
+		return 0;
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	if (vctrl->blt_change) {
+		pipe = vctrl->base_pipe;
+		spin_lock_irqsave(&vctrl->spin_lock, flags);
+		INIT_COMPLETION(vctrl->dmap_comp);
+		INIT_COMPLETION(vctrl->ov_comp);
+		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+		mdp4_dsi_video_wait4dmap(0);
+		if (pipe->ov_blt_addr)
+			mdp4_dsi_video_wait4ov(0);
+	}
+
+	pipe = vp->plist;
+	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+		if (pipe->pipe_used) {
+			cnt++;
+			mdp4_overlay_vsync_commit(pipe);
+			/* free previous iommu to freelist
+			* which will be freed at next
+			* pipe_commit
+			*/
+			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+			pipe->pipe_used = 0; /* clear */
+		}
+	}
+
+	mdp4_mixer_stage_commit(mixer);
+
+	pipe = vctrl->base_pipe;
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (pipe->ov_blt_addr) {
+		mdp4_dsi_video_blt_ov_update(pipe);
+		pipe->ov_cnt++;
+		INIT_COMPLETION(vctrl->ov_comp);
+		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+		mb();
+		vctrl->ov_koff++;
+		/* kickoff overlay engine */
+		mdp4_stat.kickoff_ov0++;
+		outpdw(MDP_BASE + 0x0004, 0);
+	} else {
+		/* schedule second phase update  at dmap */
+		INIT_COMPLETION(vctrl->dmap_comp);
+		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	mdp4_stat.overlay_commit[pipe->mixer_num]++;
+
+	return cnt;
+}
+
+void mdp4_dsi_video_vsync_ctrl(int cndx, int enable)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (vctrl->fake_vsync) {
+		vctrl->fake_vsync = 0;
+		schedule_work(&vctrl->vsync_work);
+	}
+
+	if (vctrl->vsync_irq_enabled == enable)
+		return;
+
+	pr_debug("%s: vsync enable=%d\n", __func__, enable);
+
+	vctrl->vsync_irq_enabled = enable;
+
+	if (enable)
+		vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
 	else
-		return xres * bpp;
+		vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
 }
 
-void mdp4_dsi_video_fxn_register(cmd_fxn_t fxn)
+void mdp4_dsi_video_wait4vsync(int cndx, long long *vtime)
 {
-	display_on = fxn;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	if (atomic_read(&vctrl->suspend) > 0) {
+		*vtime = -1;
+		return;
+	}
+
+	/* start timing generator & mmu if they are not started yet */
+	mdp4_overlay_dsi_video_start();
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (vctrl->wait_vsync_cnt == 0)
+		INIT_COMPLETION(vctrl->vsync_comp);
+
+	vctrl->wait_vsync_cnt++;
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	wait_for_completion(&vctrl->vsync_comp);
+	mdp4_stat.wait4vsync0++;
+
+	*vtime = ktime_to_ns(vctrl->vsync_time);
 }
 
-static void mdp4_overlay_dsi_video_wait4event(struct msm_fb_data_type *mfd,
-						int intr_done);
-
-void mdp4_dsi_video_base_swap(struct mdp4_overlay_pipe *pipe)
+static void mdp4_dsi_video_wait4dmap(int cndx)
 {
-	dsi_pipe = pipe;
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->dmap_comp);
+}
+
+static void mdp4_dsi_video_wait4ov(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->ov_comp);
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+	struct vsycn_ctrl *vctrl =
+			container_of(work, typeof(*vctrl), vsync_work);
+	char buf[64];
+	char *envp[2];
+
+	snprintf(buf, sizeof(buf), "VSYNC=%llu",
+			ktime_to_ns(vctrl->vsync_time));
+	envp[0] = buf;
+	envp[1] = NULL;
+	kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+void mdp4_dsi_vsync_init(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	pr_info("%s: ndx=%d\n", __func__, cndx);
+
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->inited)
+		return;
+
+	vctrl->inited = 1;
+	vctrl->update_ndx = 0;
+	mutex_init(&vctrl->update_lock);
+	init_completion(&vctrl->vsync_comp);
+	init_completion(&vctrl->dmap_comp);
+	init_completion(&vctrl->ov_comp);
+	atomic_set(&vctrl->suspend, 0);
+	spin_lock_init(&vctrl->spin_lock);
+	INIT_WORK(&vctrl->vsync_work, send_vsync_work);
+}
+
+void mdp4_dsi_video_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	vctrl->base_pipe = pipe;
 }
 
 int mdp4_dsi_video_on(struct platform_device *pdev)
@@ -113,8 +440,11 @@
 	struct fb_var_screeninfo *var;
 	struct msm_fb_data_type *mfd;
 	struct mdp4_overlay_pipe *pipe;
-	int ret;
+	int ret = 0;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
 
+	vctrl = &vsync_ctrl_db[cndx];
 	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
 	if (!mfd)
@@ -123,6 +453,12 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
+	vctrl->mfd = mfd;
+	vctrl->dev = mfd->fbi->dev;
+
+	/* mdp clock on */
+	mdp_clk_ctrl(1);
+
 	fbi = mfd->fbi;
 	var = &fbi->var;
 
@@ -130,7 +466,7 @@
 	buf = (uint8 *) fbi->fix.smem_start;
 	buf_offset = calc_fb_offset(mfd, fbi, bpp);
 
-	if (dsi_pipe == NULL) {
+	if (vctrl->base_pipe == NULL) {
 		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
 		if (ptype < 0)
 			printk(KERN_INFO "%s: format2type failed\n", __func__);
@@ -148,17 +484,16 @@
 		if (ret < 0)
 			printk(KERN_INFO "%s: format2type failed\n", __func__);
 
-		dsi_pipe = pipe; /* keep it */
-		init_completion(&dsi_video_comp);
-
-		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
 		pipe->ov_blt_addr = 0;
 		pipe->dma_blt_addr = 0;
+		vctrl->base_pipe = pipe; /* keep it */
+		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
 
 	} else {
-		pipe = dsi_pipe;
+		pipe = vctrl->base_pipe;
 	}
 
+#ifdef CONTINUOUS_SPLASH
 	/* MDP cmd block enable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 
@@ -171,12 +506,7 @@
 		MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
 		mipi_dsi_controller_cfg(0);
 	}
-
-	if (is_mdp4_hw_reset()) {
-		mdp4_hw_init();
-		outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
-	}
-
+#endif
 	pipe->src_height = fbi->var.yres;
 	pipe->src_width = fbi->var.xres;
 	pipe->src_h = fbi->var.yres;
@@ -194,13 +524,16 @@
 	pipe->dst_h = fbi->var.yres;
 	pipe->dst_w = fbi->var.xres;
 
+	atomic_set(&vctrl->suspend, 0);
+
 	mdp4_overlay_dmap_xy(pipe);	/* dma_p */
 	mdp4_overlay_dmap_cfg(mfd, 1);
 	mdp4_overlay_rgb_setup(pipe);
+	mdp4_overlayproc_cfg(pipe);
+
 	mdp4_overlay_reg_flush(pipe, 1);
 	mdp4_mixer_stage_up(pipe);
 
-	mdp4_overlayproc_cfg(pipe);
 
 	/*
 	 * DSI timing setting
@@ -261,6 +594,7 @@
 	ctrl_polarity =
 	    (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
 
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x4, hsync_ctrl);
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x8, vsync_period * hsync_period);
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0xc,
@@ -275,82 +609,97 @@
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x2c, dsi_underflow_clr);
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x30, dsi_hsync_skew);
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x38, ctrl_polarity);
-	mdp4_overlay_reg_flush(pipe, 1);
-	mdp4_mixer_stage_up(pipe);
-
-	mdp_histogram_ctrl_all(TRUE);
-
-	ret = panel_next_on(pdev);
-	if (ret == 0) {
-		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-		if (display_on != NULL) {
-			msleep(50);
-			display_on(pdev);
-		}
-	}
-	/* MDP cmd block disable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
+	mdp_histogram_ctrl_all(TRUE);
 	return ret;
 }
 
 int mdp4_dsi_video_off(struct platform_device *pdev)
 {
 	int ret = 0;
+	int cndx = 0;
 	struct msm_fb_data_type *mfd;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
 	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
 
-	/* MDP cmd block enable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	atomic_set(&vctrl->suspend, 1);
+
+	while (vctrl->wait_vsync_cnt)
+		msleep(20);	/* >= 17 ms */
+
 	MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0);
+
 	dsi_video_enabled = 0;
-	/* MDP cmd block disable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
 	mdp_histogram_ctrl_all(FALSE);
-	ret = panel_next_off(pdev);
 
-	/* delay to make sure the last frame finishes */
-	msleep(20);
-
-	/* dis-engage rgb0 from mixer0 */
-	if (dsi_pipe) {
+	if (pipe) {
 		if (mfd->ref_cnt == 0) {
 			/* adb stop */
-			if (dsi_pipe->pipe_type == OVERLAY_TYPE_BF)
-				mdp4_overlay_borderfill_stage_down(dsi_pipe);
+			if (pipe->pipe_type == OVERLAY_TYPE_BF)
+				mdp4_overlay_borderfill_stage_down(pipe);
 
-			/* dsi_pipe == rgb1 */
-			mdp4_overlay_unset_mixer(dsi_pipe->mixer_num);
-			dsi_pipe = NULL;
+			mdp4_overlay_unset_mixer(pipe->mixer_num);
+			vctrl->base_pipe = NULL;
 		} else {
-			mdp4_mixer_stage_down(dsi_pipe);
-			mdp4_iommu_unmap(dsi_pipe);
+			/* system suspending */
+			mdp4_mixer_stage_down(vctrl->base_pipe);
+			mdp4_overlay_iommu_pipe_free(
+				vctrl->base_pipe->pipe_ndx, 1);
 		}
 	}
 
+	vctrl->fake_vsync = 1;
+
+	/* mdp clock off */
+	mdp_clk_ctrl(0);
+	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+
 	return ret;
 }
 
+static __u32 msm_fb_line_length(__u32 fb_index, __u32 xres, int bpp)
+{
+	/*
+	 * The adreno GPU hardware requires that the pitch be aligned to
+	 * 32 pixels for color buffers, so for the cases where the GPU
+	 * is writing directly to fb0, the framebuffer pitch
+	 * also needs to be 32 pixel aligned
+	 */
+
+	if (fb_index == 0)
+		return ALIGN(xres, 32) * bpp;
+	else
+		return xres * bpp;
+}
+
 /* 3D side by side */
 void mdp4_dsi_video_3d_sbys(struct msm_fb_data_type *mfd,
 				struct msmfb_overlay_3d *r3d)
 {
 	struct fb_info *fbi;
-	struct mdp4_overlay_pipe *pipe;
 	unsigned int buf_offset;
 	int bpp;
 	uint8 *buf = NULL;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
-	if (dsi_pipe == NULL)
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	if (vctrl->base_pipe == NULL)
 		return;
 
-	dsi_pipe->is_3d = r3d->is_3d;
-	dsi_pipe->src_height_3d = r3d->height;
-	dsi_pipe->src_width_3d = r3d->width;
-
-	pipe = dsi_pipe;
+	pipe = vctrl->base_pipe;
+	pipe->is_3d = r3d->is_3d;
+	pipe->src_height_3d = r3d->height;
+	pipe->src_width_3d = r3d->width;
 
 	if (pipe->is_3d)
 		mdp4_overlay_panel_3d(pipe->mixer_num, MDP4_3D_SIDE_BY_SIDE);
@@ -402,12 +751,10 @@
 	mdp4_overlay_dmap_cfg(mfd, 1);
 
 	mdp4_overlay_reg_flush(pipe, 1);
+
 	mdp4_mixer_stage_up(pipe);
 
 	mb();
-
-	/* wait for vsycn */
-	mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
 }
 
 static void mdp4_dsi_video_blt_ov_update(struct mdp4_overlay_pipe *pipe)
@@ -416,11 +763,9 @@
 	int bpp;
 	char *overlay_base;
 
-
 	if (pipe->ov_blt_addr == 0)
 		return;
 
-
 #ifdef BLT_RGB565
 	bpp = 2; /* overlay ouput is RGB565 */
 #else
@@ -460,156 +805,98 @@
 	MDP_OUTP(MDP_BASE + 0x90008, addr);
 }
 
-/*
- * mdp4_overlay_dsi_video_wait4event:
- * INTR_DMA_P_DONE and INTR_PRIMARY_VSYNC event only
- * no INTR_OVERLAY0_DONE event allowed.
- */
-static void mdp4_overlay_dsi_video_wait4event(struct msm_fb_data_type *mfd,
-						int intr_done)
+void mdp4_overlay_dsi_video_set_perf(struct msm_fb_data_type *mfd)
 {
-	unsigned long flag;
-	unsigned int data;
-
-	data = inpdw(MDP_BASE + DSI_VIDEO_BASE);
-	data &= 0x01;
-	if (data == 0)	/* timing generator disabled */
-		return;
-
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	INIT_COMPLETION(dsi_video_comp);
-	mfd->dma->waiting = TRUE;
-	outp32(MDP_INTR_CLEAR, intr_done);
-	mdp_intr_mask |= intr_done;
-	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-	mdp_enable_irq(MDP_DMA2_TERM);  /* enable intr */
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-	wait_for_completion(&dsi_video_comp);
-	mdp_disable_irq(MDP_DMA2_TERM);
-}
-
-static void mdp4_overlay_dsi_video_dma_busy_wait(struct msm_fb_data_type *mfd)
-{
-	unsigned long flag;
-	int need_wait = 0;
-
-	pr_debug("%s: start pid=%d\n", __func__, current->pid);
-
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (mfd->dma->busy == TRUE) {
-		INIT_COMPLETION(mfd->dma->comp);
-		need_wait++;
-	}
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
-	if (need_wait) {
-		/* wait until DMA finishes the current job */
-		pr_debug("%s: pending pid=%d\n", __func__, current->pid);
-		wait_for_completion(&mfd->dma->comp);
-	}
-	pr_debug("%s: done pid=%d\n", __func__, current->pid);
-}
-
-void mdp4_overlay_dsi_video_start(void)
-{
-	if (!dsi_video_enabled) {
-		/* enable DSI block */
-		mdp4_iommu_attach();
-		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-		MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1);
-		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-		dsi_video_enabled = 1;
-	}
-}
-
-void mdp4_overlay_dsi_video_vsync_push(struct msm_fb_data_type *mfd,
-			struct mdp4_overlay_pipe *pipe)
-{
-	unsigned long flag;
-
-	if (pipe->flags & MDP_OV_PLAY_NOWAIT)
-		return;
-
-	if (dsi_pipe->ov_blt_addr) {
-		mdp4_overlay_dsi_video_dma_busy_wait(mfd);
-
-		mdp4_dsi_video_blt_ov_update(dsi_pipe);
-		dsi_pipe->ov_cnt++;
-
-		spin_lock_irqsave(&mdp_spin_lock, flag);
-		outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
-		mdp_intr_mask |= INTR_OVERLAY0_DONE;
-		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-		mdp_enable_irq(MDP_OVERLAY0_TERM);
-		mfd->dma->busy = TRUE;
-		mb();	/* make sure all registers updated */
-		spin_unlock_irqrestore(&mdp_spin_lock, flag);
-		outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
-		mdp4_stat.kickoff_ov0++;
-		mb();
-		mdp4_overlay_dsi_video_wait4event(mfd, INTR_DMA_P_DONE);
-	} else {
-		mdp4_overlay_dsi_video_wait4event(mfd, INTR_PRIMARY_VSYNC);
-	}
-
+	/* change mdp clk while mdp is idle */
 	mdp4_set_perf_level();
 }
 
+
 /*
  * mdp4_primary_vsync_dsi_video: called from isr
  */
 void mdp4_primary_vsync_dsi_video(void)
 {
-	complete_all(&dsi_video_comp);
+	int cndx;
+	struct vsycn_ctrl *vctrl;
+
+
+	cndx = 0;
+	vctrl = &vsync_ctrl_db[cndx];
+	pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+	vctrl->vsync_time = ktime_get();
+	schedule_work(&vctrl->vsync_work);
+
+	spin_lock(&vctrl->spin_lock);
+	if (vctrl->wait_vsync_cnt) {
+		complete_all(&vctrl->vsync_comp);
+		vctrl->wait_vsync_cnt = 0;
+	}
+	spin_unlock(&vctrl->spin_lock);
 }
 
  /*
- * mdp4_dma_p_done_dsi_video: called from isr
+ * mdp4_dmap_done_dsi_video: called from isr
  */
-void mdp4_dma_p_done_dsi_video(struct mdp_dma_data *dma)
+void mdp4_dmap_done_dsi_video(int cndx)
 {
-	if (blt_cfg_changed) {
-		mdp_is_in_isr = TRUE;
-		if (dsi_pipe->ov_blt_addr) {
-			mdp4_overlay_dmap_xy(dsi_pipe);
-			mdp4_overlayproc_cfg(dsi_pipe);
-		} else {
-			mdp4_overlayproc_cfg(dsi_pipe);
-			mdp4_overlay_dmap_xy(dsi_pipe);
-		}
-		mdp_is_in_isr = FALSE;
-		if (dsi_pipe->ov_blt_addr) {
-			mdp4_dsi_video_blt_ov_update(dsi_pipe);
-			dsi_pipe->ov_cnt++;
-			outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
-			mdp_intr_mask |= INTR_OVERLAY0_DONE;
-			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-			dma->busy = TRUE;
-			mdp_enable_irq(MDP_OVERLAY0_TERM);
-			/* kickoff overlay engine */
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	if (vctrl->blt_change) {
+		mdp4_overlayproc_cfg(pipe);
+		mdp4_overlay_dmap_xy(pipe);
+		if (pipe->ov_blt_addr) {
+			mdp4_dsi_video_blt_ov_update(pipe);
+			pipe->ov_cnt++;
+			/* Prefill one frame */
+			vsync_irq_enable(INTR_OVERLAY0_DONE,
+						MDP_OVERLAY0_TERM);
+			/* kickoff overlay0 engine */
+			mdp4_stat.kickoff_ov0++;
+			vctrl->ov_koff++;	/* make up for prefill */
 			outpdw(MDP_BASE + 0x0004, 0);
 		}
-		blt_cfg_changed = 0;
+		vctrl->blt_change = 0;
 	}
-	complete_all(&dsi_video_comp);
+
+	complete_all(&vctrl->dmap_comp);
+	mdp4_overlay_dma_commit(cndx);
+	spin_unlock(&vctrl->spin_lock);
 }
 
 /*
- * mdp4_overlay1_done_dsi: called from isr
+ * mdp4_overlay0_done_dsi: called from isr
  */
-void mdp4_overlay0_done_dsi_video(struct mdp_dma_data *dma)
+void mdp4_overlay0_done_dsi_video(int cndx)
 {
-	spin_lock(&mdp_spin_lock);
-	dma->busy = FALSE;
-	if (dsi_pipe->ov_blt_addr == 0) {
-		spin_unlock(&mdp_spin_lock);
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+	vctrl->ov_done++;
+	complete_all(&vctrl->ov_comp);
+	if (pipe->ov_blt_addr == 0) {
+		spin_unlock(&vctrl->spin_lock);
 		return;
 	}
-	mdp4_dsi_video_blt_dmap_update(dsi_pipe);
-	dsi_pipe->dmap_cnt++;
-	mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-	spin_unlock(&mdp_spin_lock);
-	complete(&dma->comp);
+
+	mdp4_dsi_video_blt_dmap_update(pipe);
+	pipe->dmap_cnt++;
+	spin_unlock(&vctrl->spin_lock);
 }
 
 /*
@@ -619,8 +906,12 @@
 static void mdp4_dsi_video_do_blt(struct msm_fb_data_type *mfd, int enable)
 {
 	unsigned long flag;
-	int data;
-	int change = 0;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
 
 	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
 
@@ -629,55 +920,33 @@
 		return;
 	}
 
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (enable && dsi_pipe->ov_blt_addr == 0) {
-		dsi_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
-		dsi_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
-		dsi_pipe->blt_cnt = 0;
-		dsi_pipe->ov_cnt = 0;
-		dsi_pipe->dmap_cnt = 0;
+	spin_lock_irqsave(&vctrl->spin_lock, flag);
+	if (enable && pipe->ov_blt_addr == 0) {
+		pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+		pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+		pipe->ov_cnt = 0;
+		pipe->dmap_cnt = 0;
+		vctrl->ov_koff = 0;
+		vctrl->ov_done = 0;
+		vctrl->blt_free = 0;
 		mdp4_stat.blt_dsi_video++;
-		change++;
-	} else if (enable == 0 && dsi_pipe->ov_blt_addr) {
-		dsi_pipe->ov_blt_addr = 0;
-		dsi_pipe->dma_blt_addr = 0;
-		change++;
+		vctrl->blt_change++;
+	} else if (enable == 0 && pipe->ov_blt_addr) {
+		pipe->ov_blt_addr = 0;
+		pipe->dma_blt_addr =  0;
+		vctrl->blt_free = 4;	/* 4 commits to free wb buf */
+		vctrl->blt_change++;
 	}
 
-	if (!change) {
-		spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	pr_info("%s: changed=%d enable=%d ov_blt_addr=%x\n", __func__,
+		vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
+
+	if (!vctrl->blt_change) {
+		spin_unlock_irqrestore(&vctrl->spin_lock, flag);
 		return;
 	}
 
-	pr_debug("%s: enable=%d ov_blt_addr=%x\n", __func__,
-			enable, (int)dsi_pipe->ov_blt_addr);
-	blt_cfg_changed = 1;
-
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
-	/*
-	 * may need mutex here to sync with whom dsiable
-	 * timing generator
-	 */
-	data = inpdw(MDP_BASE + DSI_VIDEO_BASE);
-	data &= 0x01;
-	if (data) {	/* timing generator enabled */
-		mdp4_overlay_dsi_video_wait4event(mfd, INTR_DMA_P_DONE);
-		msleep(20);
-	}
-
-
-}
-
-int mdp4_dsi_video_overlay_blt_offset(struct msm_fb_data_type *mfd,
-					struct msmfb_overlay_blt *req)
-{
-	req->offset = 0;
-	req->width = dsi_pipe->src_width;
-	req->height = dsi_pipe->src_height;
-	req->bpp = dsi_pipe->bpp;
-
-	return sizeof(*req);
+	spin_unlock_irqrestore(&vctrl->spin_lock, flag);
 }
 
 void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd,
@@ -702,36 +971,36 @@
 	uint8 *buf;
 	unsigned int buf_offset;
 	int bpp;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
 
-	if (!mfd->panel_power_on)
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	if (!pipe || !mfd->panel_power_on)
 		return;
 
-	/* no need to power on cmd block since it's dsi video mode */
-	bpp = fbi->var.bits_per_pixel / 8;
-	buf = (uint8 *) fbi->fix.smem_start;
-	buf_offset = calc_fb_offset(mfd, fbi, bpp);
+	pr_debug("%s: cpu=%d pid=%d\n", __func__,
+			smp_processor_id(), current->pid);
+	if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
+		bpp = fbi->var.bits_per_pixel / 8;
+		buf = (uint8 *) fbi->fix.smem_start;
+		buf_offset = calc_fb_offset(mfd, fbi, bpp);
 
-	mutex_lock(&mfd->dma->ov_mutex);
+		if (mfd->display_iova)
+			pipe->srcp0_addr = mfd->display_iova + buf_offset;
+		else
+			pipe->srcp0_addr = (uint32)(buf + buf_offset);
 
-	pipe = dsi_pipe;
-	if (pipe->pipe_used == 0 ||
-			pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
-		pr_err("%s: NOT baselayer\n", __func__);
-		mutex_unlock(&mfd->dma->ov_mutex);
-		return;
+		mdp4_dsi_video_pipe_queue(0, pipe);
 	}
 
-	if (mfd->display_iova)
-		pipe->srcp0_addr = mfd->display_iova + buf_offset;
-	else
-		pipe->srcp0_addr = (uint32)(buf + buf_offset);
+	mdp4_dsi_video_pipe_commit();
 
-	mdp4_overlay_rgb_setup(pipe);
-	mdp4_overlay_reg_flush(pipe, 1);
-	mdp4_mixer_stage_up(pipe);
-	mdp4_overlay_dsi_video_start();
-	mdp4_overlay_dsi_video_vsync_push(mfd, pipe);
-	mdp4_iommu_unmap(pipe);
-	mutex_unlock(&mfd->dma->ov_mutex);
+	if (pipe->ov_blt_addr)
+		mdp4_dsi_video_wait4ov(0);
+	else
+		mdp4_dsi_video_wait4dmap(0);
 }
+
diff --git a/drivers/video/msm/mdp4_overlay_dtv.c b/drivers/video/msm/mdp4_overlay_dtv.c
index b9d6037..57a07d0 100644
--- a/drivers/video/msm/mdp4_overlay_dtv.c
+++ b/drivers/video/msm/mdp4_overlay_dtv.c
@@ -33,6 +33,8 @@
 
 #define DTV_BASE	0xD0000
 
+static int dtv_enabled;
+
 /*#define DEBUG*/
 #ifdef DEBUG
 static void __mdp_outp(uint32 port, uint32 value)
@@ -51,15 +53,298 @@
 
 static int first_pixel_start_x;
 static int first_pixel_start_y;
-static int dtv_enabled;
 
-static struct mdp4_overlay_pipe *dtv_pipe;
-static DECLARE_COMPLETION(dtv_comp);
-
-void mdp4_dtv_base_swap(struct mdp4_overlay_pipe *pipe)
+void mdp4_dtv_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
 {
+#ifdef BYPASS4
 	if (hdmi_prim_display)
 		dtv_pipe = pipe;
+#endif
+}
+
+#define MAX_CONTROLLER	1
+
+static struct vsycn_ctrl {
+	struct device *dev;
+	int inited;
+	int update_ndx;
+	int dmae_intr_cnt;
+	atomic_t suspend;
+	int dmae_wait_cnt;
+	int wait_vsync_cnt;
+	int blt_change;
+	struct mutex update_lock;
+	struct completion dmae_comp;
+	struct completion vsync_comp;
+	spinlock_t spin_lock;
+	struct mdp4_overlay_pipe *base_pipe;
+	struct vsync_update vlist[2];
+	int vsync_irq_enabled;
+	ktime_t vsync_time;
+	struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+static void vsync_irq_enable(int intr, int term)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	outp32(MDP_INTR_CLEAR,
+		INTR_DMA_E_DONE | INTR_OVERLAY1_DONE | INTR_EXTERNAL_VSYNC);
+	mdp_intr_mask |= intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_enable_irq(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	pr_debug("%s: IRQ-en done, term=%x\n", __func__, term);
+}
+
+static void vsync_irq_disable(int intr, int term)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	outp32(MDP_INTR_CLEAR,
+		INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+	mdp_intr_mask &= ~intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_disable_irq_nosync(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term);
+}
+
+void mdp4_overlay_dtv_start(void)
+{
+	if (!dtv_enabled) {
+		/* enable DTV block */
+		mdp4_iommu_attach();
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		MDP_OUTP(MDP_BASE + DTV_BASE, 1);
+		mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+		dtv_enabled = 1;
+	}
+}
+
+/*
+ * mdp4_dtv_vsync_do_update:
+ * called from thread context
+ */
+void mdp4_dtv_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pp;
+	int undx;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	/* start timing generator & mmu if they are not started yet */
+	mdp4_overlay_dtv_start();
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+
+	pp = &vp->plist[pipe->pipe_ndx - 1];	/* ndx start form 1 */
+
+	pr_debug("%s: vndx=%d pipe_ndx=%d flags=%x pid=%d\n",
+		 __func__, undx, pipe->pipe_ndx, pipe->flags, current->pid);
+
+	*pp = *pipe;	/* keep it */
+	vp->update_cnt++;
+	mutex_unlock(&vctrl->update_lock);
+}
+
+static void mdp4_dtv_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+
+int mdp4_dtv_pipe_commit(void)
+{
+
+	int  i, undx;
+	int mixer = 0;
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+	int cnt = 0;
+
+	vctrl = &vsync_ctrl_db[0];
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+	pipe = vctrl->base_pipe;
+	mixer = pipe->mixer_num;
+	mdp4_overlay_iommu_unmap_freelist(mixer);
+
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return 0;
+	}
+
+	vctrl->update_ndx++;
+	vctrl->update_ndx &= 0x01;
+	vp->update_cnt = 0;	/* reset */
+	mutex_unlock(&vctrl->update_lock);
+
+	pipe = vp->plist;
+	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+		if (pipe->pipe_used) {
+			cnt++;
+			mdp4_overlay_vsync_commit(pipe);
+			pipe->pipe_used = 0; /* clear */
+		}
+	}
+	mdp4_mixer_stage_commit(mixer);
+
+	pipe = vctrl->base_pipe;
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (pipe->ov_blt_addr) {
+		mdp4_dtv_blt_ov_update(pipe);
+		pipe->blt_ov_done++;
+		vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
+		mb();
+		pipe->blt_ov_koff++;
+		/* kickoff overlay1 engine */
+		mdp4_stat.kickoff_ov1++;
+		outpdw(MDP_BASE + 0x0008, 0);
+	} else if (vctrl->dmae_intr_cnt == 0) {
+		/* schedule second phase update  at dmap */
+		vctrl->dmae_intr_cnt++;
+		vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	return cnt;
+}
+
+void mdp4_dtv_vsync_ctrl(int cndx, int enable)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (vctrl->vsync_irq_enabled == enable)
+		return;
+
+	pr_debug("%s: vsync enable=%d\n", __func__, enable);
+
+	vctrl->vsync_irq_enabled = enable;
+
+	if (enable)
+		vsync_irq_enable(INTR_EXTERNAL_VSYNC, MDP_EXTER_VSYNC_TERM);
+	else
+		vsync_irq_disable(INTR_EXTERNAL_VSYNC, MDP_EXTER_VSYNC_TERM);
+}
+
+void mdp4_dtv_wait4vsync(int cndx, long long *vtime)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+
+	if (vctrl->wait_vsync_cnt == 0)
+		INIT_COMPLETION(vctrl->vsync_comp);
+	vctrl->wait_vsync_cnt++;
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	wait_for_completion(&vctrl->vsync_comp);
+	mdp4_stat.wait4vsync1++;
+
+	*vtime = ktime_to_ns(vctrl->vsync_time);
+}
+
+static void mdp4_dtv_wait4dmae(int cndx)
+{
+	unsigned long flags;
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (vctrl->dmae_wait_cnt == 0) {
+		INIT_COMPLETION(vctrl->dmae_comp);
+		if (vctrl->dmae_intr_cnt == 0) {
+			vctrl->dmae_intr_cnt++;
+			vsync_irq_enable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
+		}
+	}
+	vctrl->dmae_wait_cnt++;
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	wait_for_completion(&vctrl->dmae_comp);
+	pr_info("%s: pid=%d after wait\n", __func__, current->pid);
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+	struct vsycn_ctrl *vctrl =
+		container_of(work, typeof(*vctrl), vsync_work);
+	char buf[64];
+	char *envp[2];
+
+	snprintf(buf, sizeof(buf), "VSYNC=%llu",
+			ktime_to_ns(vctrl->vsync_time));
+	envp[0] = buf;
+	envp[1] = NULL;
+	kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+void mdp4_dtv_vsync_init(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	pr_info("%s: ndx=%d\n", __func__, cndx);
+
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->inited)
+		return;
+
+	vctrl->inited = 1;
+	vctrl->update_ndx = 0;
+	mutex_init(&vctrl->update_lock);
+	init_completion(&vctrl->vsync_comp);
+	atomic_set(&vctrl->suspend, 0);
+	spin_lock_init(&vctrl->spin_lock);
+	INIT_WORK(&vctrl->vsync_work, send_vsync_work);
 }
 
 static int mdp4_dtv_start(struct msm_fb_data_type *mfd)
@@ -103,9 +388,6 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	if (dtv_pipe == NULL)
-		return -EINVAL;
-
 	fbi = mfd->fbi;
 	var = &fbi->var;
 
@@ -201,26 +483,22 @@
 	/* Test pattern 8 x 8 pixel */
 	/* MDP_OUTP(MDP_BASE + DTV_BASE + 0x4C, 0x80000808); */
 
-	mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-	/* MDP cmd block disable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	/* enable DTV block */
+	MDP_OUTP(MDP_BASE + DTV_BASE, 1);
 
 	return 0;
 }
 
 static int mdp4_dtv_stop(struct msm_fb_data_type *mfd)
 {
-	if (dtv_pipe == NULL)
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->base_pipe == NULL)
 		return -EINVAL;
 
-	/* MDP cmd block enable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-	msleep(20);
 	MDP_OUTP(MDP_BASE + DTV_BASE, 0);
-	dtv_enabled = 0;
-	/* MDP cmd block disable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-	mdp_pipe_ctrl(MDP_OVERLAY1_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
 	return 0;
 }
@@ -229,6 +507,10 @@
 {
 	struct msm_fb_data_type *mfd;
 	int ret = 0;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+
+	vctrl = &vsync_ctrl_db[cndx];
 
 	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
@@ -238,16 +520,30 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
+	vctrl->dev = mfd->fbi->dev;
+
 	mdp_footswitch_ctrl(TRUE);
+	/* Mdp clock enable */
+	mdp_clk_ctrl(1);
+
 	mdp4_overlay_panel_mode(MDP4_MIXER1, MDP4_PANEL_DTV);
-	if (dtv_pipe != NULL)
-		ret = mdp4_dtv_start(mfd);
+
+	/* Allocate dtv_pipe at dtv_on*/
+	if (vctrl->base_pipe == NULL) {
+		if (mdp4_overlay_dtv_set(mfd, NULL)) {
+			pr_warn("%s: dtv_pipe is NULL, dtv_set failed\n",
+				__func__);
+			return -EINVAL;
+		}
+	}
 
 	ret = panel_next_on(pdev);
 	if (ret != 0)
-		dev_warn(&pdev->dev, "mdp4_overlay_dtv: panel_next_on failed");
+		pr_warn("%s: panel_next_on failed", __func__);
 
-	dev_info(&pdev->dev, "mdp4_overlay_dtv: on");
+	atomic_set(&vctrl->suspend, 0);
+
+	pr_info("%s:\n", __func__);
 
 	return ret;
 }
@@ -256,47 +552,112 @@
 {
 	struct msm_fb_data_type *mfd;
 	int ret = 0;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
 	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
-	if (dtv_pipe != NULL) {
+	vctrl = &vsync_ctrl_db[cndx];
+
+	atomic_set(&vctrl->suspend, 1);
+
+	while (vctrl->wait_vsync_cnt)
+		msleep(20);	/* >= 17 ms */
+
+	pipe = vctrl->base_pipe;
+	if (pipe != NULL) {
 		mdp4_dtv_stop(mfd);
 		if (hdmi_prim_display && mfd->ref_cnt == 0) {
 			/* adb stop */
-			if (dtv_pipe->pipe_type == OVERLAY_TYPE_BF)
-				mdp4_overlay_borderfill_stage_down(dtv_pipe);
+			if (pipe->pipe_type == OVERLAY_TYPE_BF)
+				mdp4_overlay_borderfill_stage_down(pipe);
 
-			/* dtv_pipe == rgb1 */
-			mdp4_overlay_unset_mixer(dtv_pipe->mixer_num);
-			dtv_pipe = NULL;
+			/* pipe == rgb2 */
+			mdp4_overlay_unset_mixer(pipe->mixer_num);
+			vctrl->base_pipe = NULL;
 		} else {
-			mdp4_mixer_stage_down(dtv_pipe);
-			mdp4_overlay_pipe_free(dtv_pipe);
-			mdp4_iommu_unmap(dtv_pipe);
-			dtv_pipe = NULL;
+			mdp4_mixer_stage_down(pipe);
+			mdp4_overlay_pipe_free(pipe);
+			vctrl->base_pipe = NULL;
 		}
 	}
+
 	mdp4_overlay_panel_mode_unset(MDP4_MIXER1, MDP4_PANEL_DTV);
 
 	ret = panel_next_off(pdev);
 	mdp_footswitch_ctrl(FALSE);
 
-	dev_info(&pdev->dev, "mdp4_overlay_dtv: off");
+	/* Mdp clock disable */
+	mdp_clk_ctrl(0);
+
+	pr_info("%s:\n", __func__);
 	return ret;
 }
 
+static void mdp4_dtv_blt_ov_update(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 off, addr;
+	int bpp;
+	char *overlay_base;
+
+	if (pipe->ov_blt_addr == 0)
+		return;
+
+#ifdef BLT_RGB565
+	bpp = 2; /* overlay ouput is RGB565 */
+#else
+	bpp = 3; /* overlay ouput is RGB888 */
+#endif
+	off = 0;
+	if (pipe->blt_ov_done & 0x01)
+		off = pipe->src_height * pipe->src_width * bpp;
+	addr = pipe->ov_blt_addr + off;
+
+	/* overlay 1 */
+	overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x10000 */
+	outpdw(overlay_base + 0x000c, addr);
+	outpdw(overlay_base + 0x001c, addr);
+}
+
+static void mdp4_dtv_blt_dmae_update(struct mdp4_overlay_pipe *pipe)
+{
+	uint32 off, addr;
+	int bpp;
+
+	if (pipe->ov_blt_addr == 0)
+		return;
+
+#ifdef BLT_RGB565
+	bpp = 2; /* overlay ouput is RGB565 */
+#else
+	bpp = 3; /* overlay ouput is RGB888 */
+#endif
+	off = 0;
+	if (pipe->blt_dmap_done & 0x01)
+		off = pipe->src_height * pipe->src_width * bpp;
+	addr = pipe->dma_blt_addr + off;
+
+	/* dmae */
+	MDP_OUTP(MDP_BASE + 0xb0008, addr);
+}
+
+void mdp4_overlay_dtv_set_perf(struct msm_fb_data_type *mfd)
+{
+	/* change mdp clk while mdp is idle` */
+	mdp4_set_perf_level();
+}
+
 static void mdp4_overlay_dtv_alloc_pipe(struct msm_fb_data_type *mfd,
-		int32 ptype)
+		int32 ptype, struct vsycn_ctrl *vctrl)
 {
 	int ret = 0;
 	struct fb_info *fbi = mfd->fbi;
 	struct mdp4_overlay_pipe *pipe;
 
-	if (dtv_pipe != NULL)
+	if (vctrl->base_pipe != NULL)
 		return;
 
-	pr_debug("%s: ptype=%d\n", __func__, ptype);
-
 	pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER1);
 	if (pipe == NULL) {
 		pr_err("%s: pipe_alloc failed\n", __func__);
@@ -307,7 +668,6 @@
 	pipe->mixer_num = MDP4_MIXER1;
 
 	if (ptype == OVERLAY_TYPE_BF) {
-		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 		/* LSP_BORDER_COLOR */
 		MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5004,
 			((0x0 & 0xFFF) << 16) |	/* 12-bit B */
@@ -315,7 +675,7 @@
 		/* MSP_BORDER_COLOR */
 		MDP_OUTP(MDP_BASE + MDP4_OVERLAYPROC1_BASE + 0x5008,
 			(0x0 & 0xFFF));		/* 12-bit R */
-		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+		pipe->src_format = MDP_ARGB_8888;
 	} else {
 		switch (mfd->ibuf.bpp) {
 		case 2:
@@ -357,28 +717,34 @@
 	mdp4_overlay_reg_flush(pipe, 1);
 	mdp4_mixer_stage_up(pipe);
 
-	dtv_pipe = pipe; /* keep it */
+	vctrl->base_pipe = pipe; /* keep it */
 }
 
 int mdp4_overlay_dtv_set(struct msm_fb_data_type *mfd,
 			struct mdp4_overlay_pipe *pipe)
 {
-	if (dtv_pipe != NULL)
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->base_pipe != NULL)
 		return 0;
 
 	if (pipe != NULL && pipe->mixer_stage == MDP4_MIXER_STAGE_BASE &&
 			pipe->pipe_type == OVERLAY_TYPE_RGB)
-		dtv_pipe = pipe; /* keep it */
+		vctrl->base_pipe = pipe; /* keep it */
 	else if (!hdmi_prim_display && mdp4_overlay_borderfill_supported())
-		mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_BF);
+		mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_BF, vctrl);
 	else
-		mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_RGB);
-	if (dtv_pipe == NULL)
+		mdp4_overlay_dtv_alloc_pipe(mfd, OVERLAY_TYPE_RGB, vctrl);
+
+
+	if (vctrl->base_pipe == NULL)
 		return -ENODEV;
 
 	mdp4_init_writeback_buf(mfd, MDP4_MIXER1);
-	dtv_pipe->ov_blt_addr = 0;
-	dtv_pipe->dma_blt_addr = 0;
+	vctrl->base_pipe->ov_blt_addr = 0;
+	vctrl->base_pipe->dma_blt_addr = 0;
 
 	return mdp4_dtv_start(mfd);
 }
@@ -387,206 +753,112 @@
 			struct mdp4_overlay_pipe *pipe)
 {
 	int result = 0;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
 
-	if (dtv_pipe == NULL)
-		return result;
-
-	pipe->flags &= ~MDP_OV_PLAY_NOWAIT;
-	mdp4_overlay_reg_flush(pipe, 0);
-	mdp4_overlay_dtv_ov_done_push(mfd, pipe);
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->base_pipe != NULL)
+		return 0;
 
 	if (pipe->mixer_stage == MDP4_MIXER_STAGE_BASE &&
 			pipe->pipe_type == OVERLAY_TYPE_RGB) {
 		result = mdp4_dtv_stop(mfd);
-		dtv_pipe = NULL;
+		vctrl->base_pipe = NULL;
 	}
 	return result;
 }
 
-static void mdp4_dtv_blt_ov_update(struct mdp4_overlay_pipe *pipe)
+/* TODO: dtv writeback need to be added later */
+
+void mdp4_external_vsync_dtv(void)
 {
-	uint32 off, addr;
-	int bpp;
-	char *overlay_base;
+	int cndx;
+	struct vsycn_ctrl *vctrl;
 
-	if (pipe->ov_blt_addr == 0)
-		return;
-#ifdef BLT_RGB565
-	bpp = 2; /* overlay ouput is RGB565 */
-#else
-	bpp = 3; /* overlay ouput is RGB888 */
-#endif
-	off = (pipe->ov_cnt & 0x01) ?
-		pipe->src_height * pipe->src_width * bpp : 0;
+	cndx = 0;
+	vctrl = &vsync_ctrl_db[cndx];
+	pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+	vctrl->vsync_time = ktime_get();
+	schedule_work(&vctrl->vsync_work);
 
-	addr = pipe->ov_blt_addr + off;
-	pr_debug("%s overlay addr 0x%x\n", __func__, addr);
-	/* overlay 1 */
-	overlay_base = MDP_BASE + MDP4_OVERLAYPROC1_BASE;/* 0x18000 */
-	outpdw(overlay_base + 0x000c, addr);
-	outpdw(overlay_base + 0x001c, addr);
+	pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+
+	spin_lock(&vctrl->spin_lock);
+	if (vctrl->wait_vsync_cnt) {
+		complete_all(&vctrl->vsync_comp);
+		vctrl->wait_vsync_cnt = 0;
+	}
+	spin_unlock(&vctrl->spin_lock);
 }
 
-static inline void mdp4_dtv_blt_dmae_update(struct mdp4_overlay_pipe *pipe)
+/*
+ * mdp4_dmae_done_dtv: called from isr
+ */
+void mdp4_dmae_done_dtv(void)
 {
-	uint32 off, addr;
-	int bpp;
+	int cndx;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
-	if (pipe->ov_blt_addr == 0)
-		return;
-
-#ifdef BLT_RGB565
-	bpp = 2; /* overlay ouput is RGB565 */
-#else
-	bpp = 3; /* overlay ouput is RGB888 */
-#endif
-	off =  (pipe->dmae_cnt & 0x01) ?
-		pipe->src_height * pipe->src_width * bpp : 0;
-	addr = pipe->dma_blt_addr + off;
-	MDP_OUTP(MDP_BASE + 0xb0008, addr);
-}
-
-static inline void mdp4_overlay_dtv_ov_kick_start(void)
-{
-	outpdw(MDP_BASE + 0x0008, 0);
-}
-
-static void mdp4_overlay_dtv_ov_start(struct msm_fb_data_type *mfd)
-{
-	unsigned long flag;
-
-	/* enable irq */
-	if (mfd->ov_start)
-		return;
-
-	if (!dtv_pipe) {
-		pr_debug("%s: no mixer1 base layer pipe allocated!\n",
-			 __func__);
+	cndx = 0;
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
 		return;
 	}
 
-	if (dtv_pipe->ov_blt_addr) {
-		mdp4_dtv_blt_ov_update(dtv_pipe);
-		dtv_pipe->ov_cnt++;
-		mdp4_overlay_dtv_ov_kick_start();
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	spin_lock(&vctrl->spin_lock);
+	if (vctrl->blt_change) {
+		if (pipe->ov_blt_addr) {
+			mdp4_overlayproc_cfg(pipe);
+			mdp4_overlay_dmae_xy(pipe);
+			mdp4_dtv_blt_ov_update(pipe);
+			pipe->blt_ov_done++;
+
+			/* Prefill one frame */
+			vsync_irq_enable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
+			/* kickoff overlay1 engine */
+			mdp4_stat.kickoff_ov1++;
+			outpdw(MDP_BASE + 0x0008, 0);
+		}
+		vctrl->blt_change = 0;
 	}
 
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	mdp_enable_irq(MDP_OVERLAY1_TERM);
-	INIT_COMPLETION(dtv_pipe->comp);
-	mfd->dma->waiting = TRUE;
-	outp32(MDP_INTR_CLEAR, INTR_OVERLAY1_DONE);
-	mdp_intr_mask |= INTR_OVERLAY1_DONE;
-	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-	mfd->ov_start = true;
-}
-
-static void mdp4_overlay_dtv_wait4dmae(struct msm_fb_data_type *mfd)
-{
-	unsigned long flag;
-
-	if (!dtv_pipe) {
-		pr_debug("%s: no mixer1 base layer pipe allocated!\n",
-			 __func__);
-		return;
+	vctrl->dmae_intr_cnt--;
+	if (vctrl->dmae_wait_cnt) {
+		complete_all(&vctrl->dmae_comp);
+		vctrl->dmae_wait_cnt = 0; /* reset */
+	} else  {
+		mdp4_overlay_dma_commit(MDP4_MIXER1);
 	}
-	/* enable irq */
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	mdp_enable_irq(MDP_DMA_E_TERM);
-	INIT_COMPLETION(dtv_pipe->comp);
-	mfd->dma->waiting = TRUE;
-	outp32(MDP_INTR_CLEAR, INTR_DMA_E_DONE);
-	mdp_intr_mask |= INTR_DMA_E_DONE;
-	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-	wait_for_completion_killable(&dtv_pipe->comp);
-	mdp_disable_irq(MDP_DMA_E_TERM);
-}
-
-static void mdp4_overlay_dtv_wait4_ov_done(struct msm_fb_data_type *mfd,
-	struct mdp4_overlay_pipe *pipe)
-{
-	u32 data = inpdw(MDP_BASE + DTV_BASE);
-
-	if (mfd->ov_start)
-		mfd->ov_start = false;
-	else
-		return;
-	if (!(data & 0x1) || (pipe == NULL))
-		return;
-	if (!dtv_pipe) {
-		pr_debug("%s: no mixer1 base layer pipe allocated!\n",
-			 __func__);
-		return;
-	}
-
-	wait_for_completion_timeout(&dtv_pipe->comp,
-			msecs_to_jiffies(VSYNC_PERIOD * 3));
-	mdp_disable_irq(MDP_OVERLAY1_TERM);
-
-	if (dtv_pipe->ov_blt_addr)
-		mdp4_overlay_dtv_wait4dmae(mfd);
-}
-
-void mdp4_overlay_dtv_start(void)
-{
-	if (!dtv_enabled) {
-		mdp4_iommu_attach();
-		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-		/* enable DTV block */
-		MDP_OUTP(MDP_BASE + DTV_BASE, 1);
-		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-		dtv_enabled = 1;
-	}
-}
-
-void mdp4_overlay_dtv_ov_done_push(struct msm_fb_data_type *mfd,
-			struct mdp4_overlay_pipe *pipe)
-{
-	mdp4_overlay_dtv_ov_start(mfd);
-	if (pipe->flags & MDP_OV_PLAY_NOWAIT)
-		return;
-
-	mdp4_overlay_dtv_wait4_ov_done(mfd, pipe);
-
-	/* change mdp clk while mdp is idle` */
-	mdp4_set_perf_level();
-}
-
-void mdp4_overlay_dtv_wait_for_ov(struct msm_fb_data_type *mfd,
-	struct mdp4_overlay_pipe *pipe)
-{
-	mdp4_overlay_dtv_wait4_ov_done(mfd, pipe);
-	mdp4_set_perf_level();
-}
-
-void mdp4_dma_e_done_dtv()
-{
-	if (!dtv_pipe)
-		return;
-
-	complete(&dtv_pipe->comp);
-}
-
-void mdp4_external_vsync_dtv()
-{
-
-	complete_all(&dtv_comp);
+	vsync_irq_disable(INTR_DMA_E_DONE, MDP_DMA_E_TERM);
+	spin_unlock(&vctrl->spin_lock);
 }
 
 /*
  * mdp4_overlay1_done_dtv: called from isr
  */
-void mdp4_overlay1_done_dtv()
+void mdp4_overlay1_done_dtv(void)
 {
-	if (!dtv_pipe)
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	int cndx = 0;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	spin_lock(&vctrl->spin_lock);
+	if (pipe->ov_blt_addr == 0) {
+		spin_unlock(&vctrl->spin_lock);
 		return;
-	if (dtv_pipe->ov_blt_addr) {
-		mdp4_dtv_blt_dmae_update(dtv_pipe);
-		dtv_pipe->dmae_cnt++;
 	}
-	complete_all(&dtv_pipe->comp);
+
+	mdp4_dtv_blt_dmae_update(pipe);
+	pipe->blt_dmap_done++;
+	vsync_irq_disable(INTR_OVERLAY1_DONE, MDP_OVERLAY1_TERM);
+	spin_unlock(&vctrl->spin_lock);
 }
 
 void mdp4_dtv_set_black_screen(void)
@@ -595,16 +867,17 @@
 	/*Black color*/
 	uint32 color = 0x00000000;
 	uint32 temp_src_format;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
 
-	if (!dtv_pipe || !hdmi_prim_display) {
-		pr_err("dtv_pipe/hdmi as primary are not"
-			   " configured yet\n");
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->base_pipe == NULL || !hdmi_prim_display) {
+		pr_err("dtv_pipe is not configured yet\n");
 		return;
 	}
 	rgb_base = MDP_BASE + MDP4_RGB_BASE;
-	rgb_base += (MDP4_RGB_OFF * dtv_pipe->pipe_num);
+	rgb_base += (MDP4_RGB_OFF * vctrl->base_pipe->pipe_num);
 
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 	/*
 	* RGB Constant Color
 	*/
@@ -614,73 +887,71 @@
 	*/
 	temp_src_format = inpdw(rgb_base + 0x0050);
 	MDP_OUTP(rgb_base + 0x0050, temp_src_format | BIT(22));
-	mdp4_overlay_reg_flush(dtv_pipe, 1);
-	mdp4_mixer_stage_up(dtv_pipe);
+	mdp4_overlay_reg_flush(vctrl->base_pipe, 1);
+	mdp4_mixer_stage_up(vctrl->base_pipe);
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 }
 
-void mdp4_overlay_dtv_wait4vsync(void)
-{
-	unsigned long flag;
-
-	if (!dtv_enabled)
-		return;
-
-	/* enable irq */
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	mdp_enable_irq(MDP_DMA_E_TERM);
-	INIT_COMPLETION(dtv_comp);
-	outp32(MDP_INTR_CLEAR, INTR_EXTERNAL_VSYNC);
-	mdp_intr_mask |= INTR_EXTERNAL_VSYNC;
-	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-	wait_for_completion_killable(&dtv_comp);
-	mdp_disable_irq(MDP_DMA_E_TERM);
-}
-
 static void mdp4_dtv_do_blt(struct msm_fb_data_type *mfd, int enable)
 {
 	unsigned long flag;
-	int change = 0;
+	int data;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER1);
 
 	if (!mfd->ov1_wb_buf->write_addr) {
-		pr_debug("%s: no writeback buf assigned\n", __func__);
+		pr_info("%s: ctrl=%d blt_base NOT assigned\n", __func__, cndx);
 		return;
 	}
 
-	if (!dtv_pipe) {
-		pr_debug("%s: no mixer1 base layer pipe allocated!\n",
-			 __func__);
+	spin_lock_irqsave(&vctrl->spin_lock, flag);
+	if (enable && pipe->ov_blt_addr == 0) {
+		pipe->ov_blt_addr = mfd->ov1_wb_buf->write_addr;
+		pipe->dma_blt_addr = mfd->ov1_wb_buf->read_addr;
+		pipe->blt_cnt = 0;
+		pipe->ov_cnt = 0;
+		pipe->blt_dmap_done = 0;
+		pipe->blt_ov_koff = 0;
+		pipe->blt_ov_done = 0;
+		mdp4_stat.blt_dtv++;
+		vctrl->blt_change++;
+	} else if (enable == 0 && pipe->ov_blt_addr) {
+		pipe->ov_blt_addr = 0;
+		pipe->dma_blt_addr = 0;
+		vctrl->blt_change++;
+	}
+
+	pr_info("%s: enable=%d change=%d blt_addr=%x\n", __func__,
+		enable, vctrl->blt_change, (int)pipe->ov_blt_addr);
+
+	if (!vctrl->blt_change) {
+		spin_unlock_irqrestore(&vctrl->spin_lock, flag);
 		return;
 	}
 
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (enable && dtv_pipe->ov_blt_addr == 0) {
-		dtv_pipe->ov_blt_addr = mfd->ov1_wb_buf->write_addr;
-		dtv_pipe->dma_blt_addr = mfd->ov1_wb_buf->read_addr;
-		change++;
-		dtv_pipe->ov_cnt = 0;
-		dtv_pipe->dmae_cnt = 0;
-	} else if (enable == 0 && dtv_pipe->ov_blt_addr) {
-		dtv_pipe->ov_blt_addr = 0;
-		dtv_pipe->dma_blt_addr = 0;
-		change++;
-	}
-	pr_debug("%s: ov_blt_addr=%x\n", __func__, (int)dtv_pipe->ov_blt_addr);
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	atomic_set(&vctrl->suspend, 1);
+	spin_unlock_irqrestore(&vctrl->spin_lock, flag);
 
-	if (!change)
-		return;
+	data = inpdw(MDP_BASE + DTV_BASE);
+	data &= 0x01;
+	if (data)       /* timing generator enabled */
+		mdp4_dtv_wait4dmae(0);
 
-	if (dtv_enabled) {
-		mdp4_overlay_dtv_wait4dmae(mfd);
-		MDP_OUTP(MDP_BASE + DTV_BASE, 0);	/* stop dtv */
+	if (pipe->ov_blt_addr == 0) {
+		MDP_OUTP(MDP_BASE + DTV_BASE, 0);       /* stop dtv */
 		msleep(20);
+		mdp4_overlayproc_cfg(pipe);
+		mdp4_overlay_dmae_xy(pipe);
+		MDP_OUTP(MDP_BASE + DTV_BASE, 1);       /* start dtv */
 	}
 
-	mdp4_overlay_dmae_xy(dtv_pipe);
-	mdp4_overlayproc_cfg(dtv_pipe);
-	MDP_OUTP(MDP_BASE + DTV_BASE, 1);	/* start dtv */
+	atomic_set(&vctrl->suspend, 0);
 }
 
 void mdp4_dtv_overlay_blt_start(struct msm_fb_data_type *mfd)
@@ -695,20 +966,23 @@
 
 void mdp4_dtv_overlay(struct msm_fb_data_type *mfd)
 {
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
+
 	if (!mfd->panel_power_on)
 		return;
 
-	mutex_lock(&mfd->dma->ov_mutex);
-	if (dtv_pipe == NULL) {
-		if (mdp4_overlay_dtv_set(mfd, NULL)) {
-			pr_warn("%s: dtv_pipe == NULL\n", __func__);
-			mutex_unlock(&mfd->dma->ov_mutex);
-			return;
-		}
-	}
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->base_pipe == NULL)
+		mdp4_overlay_dtv_set(mfd, NULL);
 
-	pipe = dtv_pipe;
+	pipe = vctrl->base_pipe;
+
+	if (pipe == NULL) {
+		pr_warn("%s: dtv_pipe == NULL\n", __func__);
+		return;
+	}
 
 	if (hdmi_prim_display && (pipe->pipe_used == 0 ||
 			pipe->mixer_stage != MDP4_MIXER_STAGE_BASE)) {
@@ -717,14 +991,9 @@
 		return;
 	}
 
-	if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
-		pipe->srcp0_addr = (uint32) mfd->ibuf.buf;
-		mdp4_overlay_rgb_setup(pipe);
+	if (pipe->pipe_type == OVERLAY_TYPE_RGB)  {
+		pipe->srcp0_addr = (uint32)mfd->ibuf.buf;
+		mdp4_dtv_pipe_queue(0, pipe);
 	}
-	mdp4_overlay_reg_flush(pipe, 1);
-	mdp4_mixer_stage_up(pipe);
-	mdp4_overlay_dtv_start();
-	mdp4_overlay_dtv_ov_done_push(mfd, pipe);
-	mdp4_iommu_unmap(pipe);
-	mutex_unlock(&mfd->dma->ov_mutex);
+	mdp4_dtv_pipe_commit();
 }
diff --git a/drivers/video/msm/mdp4_overlay_lcdc.c b/drivers/video/msm/mdp4_overlay_lcdc.c
index bcc4ea6..2da2052 100644
--- a/drivers/video/msm/mdp4_overlay_lcdc.c
+++ b/drivers/video/msm/mdp4_overlay_lcdc.c
@@ -41,17 +41,372 @@
 
 int first_pixel_start_x;
 int first_pixel_start_y;
+
 static int lcdc_enabled;
 
-static struct mdp4_overlay_pipe *lcdc_pipe;
-static struct completion lcdc_comp;
+#define MAX_CONTROLLER	1
 
-void mdp4_lcdc_base_swap(struct mdp4_overlay_pipe *pipe)
+static struct vsycn_ctrl {
+	struct device *dev;
+	int inited;
+	int update_ndx;
+	int ov_koff;
+	int ov_done;
+	atomic_t suspend;
+	int wait_vsync_cnt;
+	int blt_change;
+	int blt_free;
+	int fake_vsync;
+	struct mutex update_lock;
+	struct completion ov_comp;
+	struct completion dmap_comp;
+	struct completion vsync_comp;
+	spinlock_t spin_lock;
+	struct msm_fb_data_type *mfd;
+	struct mdp4_overlay_pipe *base_pipe;
+	struct vsync_update vlist[2];
+	int vsync_irq_enabled;
+	ktime_t vsync_time;
+	struct work_struct vsync_work;
+} vsync_ctrl_db[MAX_CONTROLLER];
+
+
+/*******************************************************
+to do:
+1) move vsync_irq_enable/vsync_irq_disable to mdp.c to be shared
+*******************************************************/
+static void vsync_irq_enable(int intr, int term)
 {
-	lcdc_pipe = pipe;
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	outp32(MDP_INTR_CLEAR,
+		INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+	mdp_intr_mask |= intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_enable_irq(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	pr_debug("%s: IRQ-en done, term=%x\n", __func__, term);
 }
 
-int mdp_lcdc_on(struct platform_device *pdev)
+static void vsync_irq_disable(int intr, int term)
+{
+	unsigned long flag;
+
+	spin_lock_irqsave(&mdp_spin_lock, flag);
+	outp32(MDP_INTR_CLEAR,
+		INTR_DMA_P_DONE | INTR_OVERLAY0_DONE | INTR_PRIMARY_VSYNC);
+	mdp_intr_mask &= ~intr;
+	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
+	mdp_disable_irq_nosync(term);
+	spin_unlock_irqrestore(&mdp_spin_lock, flag);
+	pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term);
+}
+
+static void mdp4_overlay_lcdc_start(void)
+{
+	if (!lcdc_enabled) {
+		/* enable DSI block */
+		mdp4_iommu_attach();
+		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		MDP_OUTP(MDP_BASE + LCDC_BASE, 1);
+		lcdc_enabled = 1;
+	}
+}
+
+/*
+ * mdp4_lcdc_pipe_queue:
+ * called from thread context
+ */
+void mdp4_lcdc_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pp;
+	int undx;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+       /* start timing generator & mmu if they are not started yet */
+	mdp4_overlay_lcdc_start();
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+
+	pp = &vp->plist[pipe->pipe_ndx - 1];	/* ndx start form 1 */
+
+	pr_debug("%s: vndx=%d pipe_ndx=%d pid=%d\n", __func__,
+			undx, pipe->pipe_ndx, current->pid);
+
+	*pp = *pipe;	/* keep it */
+	vp->update_cnt++;
+	mutex_unlock(&vctrl->update_lock);
+	mdp4_stat.overlay_play[pipe->mixer_num]++;
+}
+
+static void mdp4_lcdc_blt_ov_update(struct mdp4_overlay_pipe *pipe);
+static void mdp4_lcdc_wait4dmap(int cndx);
+static void mdp4_lcdc_wait4ov(int cndx);
+
+int mdp4_lcdc_pipe_commit(void)
+{
+
+	int  i, undx;
+	int mixer = 0;
+	struct vsycn_ctrl *vctrl;
+	struct vsync_update *vp;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+	int cnt = 0;
+
+	vctrl = &vsync_ctrl_db[0];
+
+	mutex_lock(&vctrl->update_lock);
+	undx =  vctrl->update_ndx;
+	vp = &vctrl->vlist[undx];
+	pipe = vctrl->base_pipe;
+	mixer = pipe->mixer_num;
+
+	if (vp->update_cnt == 0) {
+		mutex_unlock(&vctrl->update_lock);
+		return 0;
+	}
+
+	vctrl->update_ndx++;
+	vctrl->update_ndx &= 0x01;
+	vp->update_cnt = 0;     /* reset */
+	if (vctrl->blt_free) {
+		vctrl->blt_free--;
+		if (vctrl->blt_free == 0)
+			mdp4_free_writeback_buf(vctrl->mfd, mixer);
+	}
+	mutex_unlock(&vctrl->update_lock);
+
+	/* free previous committed iommu back to pool */
+	mdp4_overlay_iommu_unmap_freelist(mixer);
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (vctrl->ov_koff != vctrl->ov_done) {
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+		pr_err("%s: Error, frame dropped %d %d\n", __func__,
+			vctrl->ov_koff, vctrl->ov_done);
+		return 0;
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	if (vctrl->blt_change) {
+		pipe = vctrl->base_pipe;
+		spin_lock_irqsave(&vctrl->spin_lock, flags);
+		INIT_COMPLETION(vctrl->dmap_comp);
+		INIT_COMPLETION(vctrl->ov_comp);
+		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+		spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+		mdp4_lcdc_wait4dmap(0);
+		if (pipe->ov_blt_addr)
+			mdp4_lcdc_wait4ov(0);
+	}
+
+	pipe = vp->plist;
+	for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
+		if (pipe->pipe_used) {
+			cnt++;
+			mdp4_overlay_vsync_commit(pipe);
+			/* free previous iommu to freelist
+			 * which will be freed at next
+			 * pipe_commit
+			 */
+			mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
+			pipe->pipe_used = 0; /* clear */
+		}
+	}
+
+	mdp4_mixer_stage_commit(mixer);
+
+	pipe = vctrl->base_pipe;
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+	if (pipe->ov_blt_addr) {
+		mdp4_lcdc_blt_ov_update(pipe);
+		pipe->ov_cnt++;
+		INIT_COMPLETION(vctrl->ov_comp);
+		vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+		mb();
+		vctrl->ov_koff++;
+		/* kickoff overlay engine */
+		mdp4_stat.kickoff_ov0++;
+		outpdw(MDP_BASE + 0x0004, 0);
+	} else {
+		/* schedule second phase update  at dmap */
+		INIT_COMPLETION(vctrl->dmap_comp);
+		vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	}
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	mdp4_stat.overlay_commit[pipe->mixer_num]++;
+
+	return cnt;
+}
+
+void mdp4_lcdc_vsync_ctrl(int cndx, int enable)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (vctrl->fake_vsync) {
+		vctrl->fake_vsync = 0;
+		schedule_work(&vctrl->vsync_work);
+	}
+
+	if (vctrl->vsync_irq_enabled == enable)
+		return;
+
+	pr_debug("%s: vsync enable=%d\n", __func__, enable);
+
+	vctrl->vsync_irq_enabled = enable;
+
+	if (enable)
+		vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
+	else
+		vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
+}
+
+void mdp4_lcdc_wait4vsync(int cndx, long long *vtime)
+{
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+	unsigned long flags;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	if (atomic_read(&vctrl->suspend) > 0) {
+		*vtime = -1;
+		return;
+	}
+
+	/* start timing generator & mmu if they are not started yet */
+	mdp4_overlay_lcdc_start();
+
+	spin_lock_irqsave(&vctrl->spin_lock, flags);
+
+	if (vctrl->wait_vsync_cnt == 0)
+		INIT_COMPLETION(vctrl->vsync_comp);
+	vctrl->wait_vsync_cnt++;
+	spin_unlock_irqrestore(&vctrl->spin_lock, flags);
+
+	wait_for_completion(&vctrl->vsync_comp);
+	mdp4_stat.wait4vsync0++;
+
+	*vtime = vctrl->vsync_time.tv64;
+}
+
+static void mdp4_lcdc_wait4dmap(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->dmap_comp);
+}
+
+static void mdp4_lcdc_wait4ov(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+
+	if (atomic_read(&vctrl->suspend) > 0)
+		return;
+
+	wait_for_completion(&vctrl->ov_comp);
+}
+
+static void send_vsync_work(struct work_struct *work)
+{
+	struct vsycn_ctrl *vctrl =
+		container_of(work, typeof(*vctrl), vsync_work);
+	char buf[64];
+	char *envp[2];
+
+	snprintf(buf, sizeof(buf), "VSYNC=%llu",
+				ktime_to_ns(vctrl->vsync_time));
+	envp[0] = buf;
+	envp[1] = NULL;
+	kobject_uevent_env(&vctrl->dev->kobj, KOBJ_CHANGE, envp);
+}
+
+void mdp4_lcdc_vsync_init(int cndx)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	pr_info("%s: ndx=%d\n", __func__, cndx);
+
+	vctrl = &vsync_ctrl_db[cndx];
+	if (vctrl->inited)
+		return;
+
+	vctrl->inited = 1;
+	vctrl->update_ndx = 0;
+	mutex_init(&vctrl->update_lock);
+	init_completion(&vctrl->vsync_comp);
+	init_completion(&vctrl->dmap_comp);
+	init_completion(&vctrl->ov_comp);
+	atomic_set(&vctrl->suspend, 0);
+	spin_lock_init(&vctrl->spin_lock);
+	INIT_WORK(&vctrl->vsync_work, send_vsync_work);
+}
+
+void mdp4_lcdc_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
+{
+	struct vsycn_ctrl *vctrl;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+
+	vctrl = &vsync_ctrl_db[cndx];
+	vctrl->base_pipe = pipe;
+}
+
+int mdp4_lcdc_on(struct platform_device *pdev)
 {
 	int lcdc_width;
 	int lcdc_height;
@@ -90,8 +445,11 @@
 	struct fb_var_screeninfo *var;
 	struct msm_fb_data_type *mfd;
 	struct mdp4_overlay_pipe *pipe;
-	int ret;
+	int ret = 0;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
 
+	vctrl = &vsync_ctrl_db[cndx];
 	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
 
 	if (!mfd)
@@ -100,21 +458,20 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
+	vctrl->mfd = mfd;
+	vctrl->dev = mfd->fbi->dev;
+
+	/* mdp clock on */
+	mdp_clk_ctrl(1);
+
 	fbi = mfd->fbi;
 	var = &fbi->var;
 
-	/* MDP cmd block enable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-	if (is_mdp4_hw_reset()) {
-		mdp4_hw_init();
-		outpdw(MDP_BASE + 0x0038, mdp4_display_intf);
-	}
-
 	bpp = fbi->var.bits_per_pixel / 8;
 	buf = (uint8 *) fbi->fix.smem_start;
 	buf_offset = calc_fb_offset(mfd, fbi, bpp);
 
-	if (lcdc_pipe == NULL) {
+	if (vctrl->base_pipe == NULL) {
 		ptype = mdp4_overlay_format2type(mfd->fb_imgType);
 		if (ptype < 0)
 			printk(KERN_INFO "%s: format2type failed\n", __func__);
@@ -129,16 +486,17 @@
 		ret = mdp4_overlay_format2pipe(pipe);
 		if (ret < 0)
 			printk(KERN_INFO "%s: format2pipe failed\n", __func__);
-		lcdc_pipe = pipe; /* keep it */
-		init_completion(&lcdc_comp);
 
 		mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
 		pipe->ov_blt_addr = 0;
 		pipe->dma_blt_addr = 0;
+
+		vctrl->base_pipe = pipe; /* keep it */
 	} else {
-		pipe = lcdc_pipe;
+		pipe = vctrl->base_pipe;
 	}
 
+
 	pipe->src_height = fbi->var.yres;
 	pipe->src_width = fbi->var.xres;
 	pipe->src_h = fbi->var.yres;
@@ -154,14 +512,16 @@
 	pipe->srcp0_ystride = fbi->fix.line_length;
 	pipe->bpp = bpp;
 
+	atomic_set(&vctrl->suspend, 0);
+
 	mdp4_overlay_dmap_xy(pipe);
 	mdp4_overlay_dmap_cfg(mfd, 1);
-
 	mdp4_overlay_rgb_setup(pipe);
+	mdp4_overlayproc_cfg(pipe);
+
 	mdp4_overlay_reg_flush(pipe, 1);
 	mdp4_mixer_stage_up(pipe);
 
-	mdp4_overlayproc_cfg(pipe);
 
 	/*
 	 * LCDC timing setting
@@ -238,6 +598,7 @@
 	ctrl_polarity =
 	    (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
 
+	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
 	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x4, hsync_ctrl);
 	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x8, vsync_period);
 	MDP_OUTP(MDP_BASE + LCDC_BASE + 0xc, vsync_pulse_width * hsync_period);
@@ -251,69 +612,56 @@
 	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x1c, active_hctl);
 	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x20, active_v_start);
 	MDP_OUTP(MDP_BASE + LCDC_BASE + 0x24, active_v_end);
-
-	mdp4_overlay_reg_flush(pipe, 1);
-	mdp4_mixer_stage_up(pipe);
-
-#ifdef CONFIG_MSM_BUS_SCALING
-	mdp_bus_scale_update_request(2);
-#endif
-	mdp_histogram_ctrl_all(TRUE);
-
-	ret = panel_next_on(pdev);
-	if (ret == 0)
-		mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-
-	/* MDP cmd block disable */
 	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
+	mdp_histogram_ctrl_all(TRUE);
 	return ret;
 }
 
-int mdp_lcdc_off(struct platform_device *pdev)
+int mdp4_lcdc_off(struct platform_device *pdev)
 {
 	int ret = 0;
+	int cndx = 0;
 	struct msm_fb_data_type *mfd;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
 
 	mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
 
-	mutex_lock(&mfd->dma->ov_mutex);
+	atomic_set(&vctrl->suspend, 1);
 
-	/* MDP cmd block enable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+	while (vctrl->wait_vsync_cnt)
+		msleep(20);	/* >= 17 ms */
+
 	MDP_OUTP(MDP_BASE + LCDC_BASE, 0);
+
 	lcdc_enabled = 0;
-	/* MDP cmd block disable */
-	mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
 	mdp_histogram_ctrl_all(FALSE);
-	ret = panel_next_off(pdev);
 
-	mutex_unlock(&mfd->dma->ov_mutex);
-
-	/* delay to make sure the last frame finishes */
-	msleep(20);
-
-	/* dis-engage rgb0 from mixer0 */
-	if (lcdc_pipe) {
+	if (pipe) {
 		if (mfd->ref_cnt == 0) {
 			/* adb stop */
-			if (lcdc_pipe->pipe_type == OVERLAY_TYPE_BF)
-				mdp4_overlay_borderfill_stage_down(lcdc_pipe);
+			if (pipe->pipe_type == OVERLAY_TYPE_BF)
+				mdp4_overlay_borderfill_stage_down(pipe);
 
-			/* lcdc_pipe == rgb1 */
-			mdp4_overlay_unset_mixer(lcdc_pipe->mixer_num);
-			lcdc_pipe = NULL;
+			mdp4_overlay_unset_mixer(pipe->mixer_num);
+			vctrl->base_pipe = NULL;
 		} else {
-			mdp4_mixer_stage_down(lcdc_pipe);
-			mdp4_iommu_unmap(lcdc_pipe);
+			/* system suspending */
+			mdp4_mixer_stage_down(vctrl->base_pipe);
+			mdp4_overlay_iommu_pipe_free(
+				vctrl->base_pipe->pipe_ndx, 1);
 		}
 	}
 
-#ifdef CONFIG_MSM_BUS_SCALING
-	mdp_bus_scale_update_request(0);
-#endif
+	vctrl->fake_vsync = 1;
+
+	/* MDP clock disable */
+	mdp_clk_ctrl(0);
+	mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
 
 	return ret;
 }
@@ -324,11 +672,9 @@
 	int bpp;
 	char *overlay_base;
 
-
 	if (pipe->ov_blt_addr == 0)
 		return;
 
-
 #ifdef BLT_RGB565
 	bpp = 2; /* overlay ouput is RGB565 */
 #else
@@ -353,7 +699,6 @@
 	if (pipe->ov_blt_addr == 0)
 		return;
 
-
 #ifdef BLT_RGB565
 	bpp = 2; /* overlay ouput is RGB565 */
 #else
@@ -368,97 +713,9 @@
 	MDP_OUTP(MDP_BASE + 0x90008, addr);
 }
 
-/*
- * mdp4_overlay_lcdc_wait4event:
- * INTR_DMA_P_DONE and INTR_PRIMARY_VSYNC event only
- * no INTR_OVERLAY0_DONE event allowed.
- */
-static void mdp4_overlay_lcdc_wait4event(struct msm_fb_data_type *mfd,
-					int intr_done)
+void mdp4_overlay_lcdc_set_perf(struct msm_fb_data_type *mfd)
 {
-	unsigned long flag;
-	unsigned int data;
-
-	data = inpdw(MDP_BASE + LCDC_BASE);
-	data &= 0x01;
-	if (data == 0)	/* timing generator disabled */
-		return;
-
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	INIT_COMPLETION(lcdc_comp);
-	mfd->dma->waiting = TRUE;
-	outp32(MDP_INTR_CLEAR, intr_done);
-	mdp_intr_mask |= intr_done;
-	outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-	mdp_enable_irq(MDP_DMA2_TERM);  /* enable intr */
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-	wait_for_completion(&lcdc_comp);
-	mdp_disable_irq(MDP_DMA2_TERM);
-}
-
-static void mdp4_overlay_lcdc_dma_busy_wait(struct msm_fb_data_type *mfd)
-{
-	unsigned long flag;
-	int need_wait = 0;
-
-	pr_debug("%s: start pid=%d\n", __func__, current->pid);
-
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (mfd->dma->busy == TRUE) {
-		INIT_COMPLETION(mfd->dma->comp);
-		need_wait++;
-	}
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
-
-	if (need_wait) {
-		/* wait until DMA finishes the current job */
-		pr_debug("%s: pending pid=%d\n", __func__, current->pid);
-		wait_for_completion(&mfd->dma->comp);
-	}
-	pr_debug("%s: done pid=%d\n", __func__, current->pid);
-}
-
-void mdp4_overlay_lcdc_start(void)
-{
-	if (!lcdc_enabled) {
-		/* enable LCDC block */
-		mdp4_iommu_attach();
-		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
-		MDP_OUTP(MDP_BASE + LCDC_BASE, 1);
-		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
-		lcdc_enabled = 1;
-	}
-}
-
-void mdp4_overlay_lcdc_vsync_push(struct msm_fb_data_type *mfd,
-			struct mdp4_overlay_pipe *pipe)
-{
-	unsigned long flag;
-
-	if (pipe->flags & MDP_OV_PLAY_NOWAIT)
-		return;
-
-	if (lcdc_pipe->ov_blt_addr) {
-		mdp4_overlay_lcdc_dma_busy_wait(mfd);
-
-		mdp4_lcdc_blt_ov_update(lcdc_pipe);
-		lcdc_pipe->ov_cnt++;
-
-		spin_lock_irqsave(&mdp_spin_lock, flag);
-		outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
-		mdp_intr_mask |= INTR_OVERLAY0_DONE;
-		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-		mdp_enable_irq(MDP_OVERLAY0_TERM);
-		mfd->dma->busy = TRUE;
-		mb();	/* make sure all registers updated */
-		spin_unlock_irqrestore(&mdp_spin_lock, flag);
-		outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
-		mdp4_stat.kickoff_ov0++;
-		mb();
-		mdp4_overlay_lcdc_wait4event(mfd, INTR_DMA_P_DONE);
-	} else {
-		mdp4_overlay_lcdc_wait4event(mfd, INTR_PRIMARY_VSYNC);
-	}
+	/* change mdp clk while mdp is idle */
 	mdp4_set_perf_level();
 }
 
@@ -467,118 +724,131 @@
  */
 void mdp4_primary_vsync_lcdc(void)
 {
-	complete_all(&lcdc_comp);
+	int cndx;
+	struct vsycn_ctrl *vctrl;
+
+	cndx = 0;
+	vctrl = &vsync_ctrl_db[cndx];
+	pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
+	vctrl->vsync_time = ktime_get();
+	schedule_work(&vctrl->vsync_work);
+
+	spin_lock(&vctrl->spin_lock);
+	if (vctrl->wait_vsync_cnt) {
+		complete_all(&vctrl->vsync_comp);
+		vctrl->wait_vsync_cnt = 0;
+	}
+
+	spin_unlock(&vctrl->spin_lock);
 }
 
 /*
  * mdp4_dma_p_done_lcdc: called from isr
  */
-void mdp4_dma_p_done_lcdc(void)
+void mdp4_dmap_done_lcdc(int cndx)
 {
-	complete_all(&lcdc_comp);
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+
+	if (cndx >= MAX_CONTROLLER) {
+		pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
+		return;
+	}
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
+	if (vctrl->blt_change) {
+		mdp4_overlayproc_cfg(pipe);
+		mdp4_overlay_dmap_xy(pipe);
+		if (pipe->ov_blt_addr) {
+			mdp4_lcdc_blt_ov_update(pipe);
+			pipe->ov_cnt++;
+			/* Prefill one frame */
+			vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+			/* kickoff overlay0 engine */
+			mdp4_stat.kickoff_ov0++;
+			vctrl->ov_koff++;       /* make up for prefill */
+			outpdw(MDP_BASE + 0x0004, 0);
+		}
+		vctrl->blt_change = 0;
+	}
+
+	complete_all(&vctrl->dmap_comp);
+	mdp4_overlay_dma_commit(cndx);
+	spin_unlock(&vctrl->spin_lock);
 }
 
 /*
  * mdp4_overlay0_done_lcdc: called from isr
  */
-void mdp4_overlay0_done_lcdc(struct mdp_dma_data *dma)
+void mdp4_overlay0_done_lcdc(int cndx)
 {
-	spin_lock(&mdp_spin_lock);
-	dma->busy = FALSE;
-	if (lcdc_pipe->ov_blt_addr == 0) {
-		spin_unlock(&mdp_spin_lock);
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	spin_lock(&vctrl->spin_lock);
+	vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
+	vctrl->ov_done++;
+	complete_all(&vctrl->ov_comp);
+	if (pipe->ov_blt_addr == 0) {
+		spin_unlock(&vctrl->spin_lock);
 		return;
 	}
-	mdp4_lcdc_blt_dmap_update(lcdc_pipe);
-	lcdc_pipe->dmap_cnt++;
-	mdp_disable_irq_nosync(MDP_OVERLAY0_TERM);
-	spin_unlock(&mdp_spin_lock);
-	complete(&dma->comp);
+
+	mdp4_lcdc_blt_dmap_update(pipe);
+	pipe->dmap_cnt++;
+	spin_unlock(&vctrl->spin_lock);
 }
 
-static void mdp4_overlay_lcdc_prefill(struct msm_fb_data_type *mfd)
-{
-	unsigned long flag;
-
-	if (lcdc_pipe->ov_blt_addr) {
-		mdp4_overlay_lcdc_dma_busy_wait(mfd);
-
-		mdp4_lcdc_blt_ov_update(lcdc_pipe);
-		lcdc_pipe->ov_cnt++;
-
-		spin_lock_irqsave(&mdp_spin_lock, flag);
-		outp32(MDP_INTR_CLEAR, INTR_OVERLAY0_DONE);
-		mdp_intr_mask |= INTR_OVERLAY0_DONE;
-		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-		mdp_enable_irq(MDP_OVERLAY0_TERM);
-		mfd->dma->busy = TRUE;
-		mb();	/* make sure all registers updated */
-		spin_unlock_irqrestore(&mdp_spin_lock, flag);
-		outpdw(MDP_BASE + 0x0004, 0); /* kickoff overlay engine */
-		mdp4_stat.kickoff_ov0++;
-		mb();
-	}
-}
-/*
- * make sure the WRITEBACK_SIZE defined at boardfile
- * has enough space h * w * 3 * 2
- */
 static void mdp4_lcdc_do_blt(struct msm_fb_data_type *mfd, int enable)
 {
 	unsigned long flag;
-	int change = 0;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
+	struct mdp4_overlay_pipe *pipe;
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
 
 	mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
 
-	if (!mfd->ov0_wb_buf->write_addr) {
-		pr_debug("%s: no blt_base assigned\n", __func__);
+	if (mfd->ov0_wb_buf->write_addr == 0) {
+		pr_info("%s: no blt_base assigned\n", __func__);
 		return;
 	}
 
-	spin_lock_irqsave(&mdp_spin_lock, flag);
-	if (enable && lcdc_pipe->ov_blt_addr == 0) {
-		lcdc_pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
-		lcdc_pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
-		change++;
-		lcdc_pipe->blt_cnt = 0;
-		lcdc_pipe->ov_cnt = 0;
-		lcdc_pipe->dmap_cnt = 0;
+	spin_lock_irqsave(&vctrl->spin_lock, flag);
+	if (enable && pipe->ov_blt_addr == 0) {
+		pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
+		pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
+		pipe->ov_cnt = 0;
+		pipe->dmap_cnt = 0;
+		vctrl->ov_koff = 0;
+		vctrl->ov_done = 0;
+		vctrl->blt_free = 0;
 		mdp4_stat.blt_lcdc++;
-	} else if (enable == 0 && lcdc_pipe->ov_blt_addr) {
-		lcdc_pipe->ov_blt_addr = 0;
-		lcdc_pipe->dma_blt_addr = 0;
-		change++;
+		vctrl->blt_change++;
+	} else if (enable == 0 && pipe->ov_blt_addr) {
+		pipe->ov_blt_addr = 0;
+		pipe->dma_blt_addr = 0;
+		vctrl->blt_free = 4;    /* 4 commits to free wb buf */
+		vctrl->blt_change++;
 	}
-	pr_info("%s: ov_blt_addr=%x\n", __func__, (int)lcdc_pipe->ov_blt_addr);
-	spin_unlock_irqrestore(&mdp_spin_lock, flag);
 
-	if (!change)
+	pr_info("%s: enable=%d change=%d blt_addr=%x\n", __func__,
+		vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
+
+	if (!vctrl->blt_change) {
+		spin_unlock_irqrestore(&vctrl->spin_lock, flag);
 		return;
-
-	if (lcdc_enabled) {
-		mdp4_overlay_lcdc_wait4event(mfd, INTR_DMA_P_DONE);
-		MDP_OUTP(MDP_BASE + LCDC_BASE, 0);	/* stop lcdc */
-		msleep(20);
 	}
 
-	mdp4_overlay_dmap_xy(lcdc_pipe);
-	mdp4_overlayproc_cfg(lcdc_pipe);
-	if (lcdc_pipe->ov_blt_addr) {
-		mdp4_overlay_lcdc_prefill(mfd);
-		mdp4_overlay_lcdc_prefill(mfd);
-	}
-	MDP_OUTP(MDP_BASE + LCDC_BASE, 1);	/* start lcdc */
-}
-
-int mdp4_lcdc_overlay_blt_offset(struct msm_fb_data_type *mfd,
-					struct msmfb_overlay_blt *req)
-{
-	req->offset = 0;
-	req->width = lcdc_pipe->src_width;
-	req->height = lcdc_pipe->src_height;
-	req->bpp = lcdc_pipe->bpp;
-
-	return sizeof(*req);
+	spin_unlock_irqrestore(&vctrl->spin_lock, flag);
 }
 
 void mdp4_lcdc_overlay_blt(struct msm_fb_data_type *mfd,
@@ -603,36 +873,36 @@
 	uint8 *buf;
 	unsigned int buf_offset;
 	int bpp;
+	int cndx = 0;
+	struct vsycn_ctrl *vctrl;
 	struct mdp4_overlay_pipe *pipe;
 
-	if (!mfd->panel_power_on)
+
+	vctrl = &vsync_ctrl_db[cndx];
+	pipe = vctrl->base_pipe;
+
+	if (!pipe || !mfd->panel_power_on)
 		return;
 
-	/* no need to power on cmd block since it's lcdc mode */
-	bpp = fbi->var.bits_per_pixel / 8;
-	buf = (uint8 *) fbi->fix.smem_start;
-	buf_offset = calc_fb_offset(mfd, fbi, bpp);
+	pr_debug("%s: cpu=%d pid=%d\n", __func__,
+			smp_processor_id(), current->pid);
+	if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
+		bpp = fbi->var.bits_per_pixel / 8;
+		buf = (uint8 *) fbi->fix.smem_start;
+		buf_offset = calc_fb_offset(mfd, fbi, bpp);
 
-	mutex_lock(&mfd->dma->ov_mutex);
+		if (mfd->display_iova)
+			pipe->srcp0_addr = mfd->display_iova + buf_offset;
+		else
+			pipe->srcp0_addr = (uint32)(buf + buf_offset);
 
-	pipe = lcdc_pipe;
-	if (pipe->pipe_used == 0 ||
-			pipe->mixer_stage != MDP4_MIXER_STAGE_BASE) {
-		pr_err("%s: NOT baselayer\n", __func__);
-		mutex_unlock(&mfd->dma->ov_mutex);
-		return;
+		mdp4_lcdc_pipe_queue(0, pipe);
 	}
 
-	if (mfd->display_iova)
-		pipe->srcp0_addr = mfd->display_iova + buf_offset;
-	else
-		pipe->srcp0_addr = (uint32)(buf + buf_offset);
+	mdp4_lcdc_pipe_commit();
 
-	mdp4_overlay_rgb_setup(pipe);
-	mdp4_overlay_reg_flush(pipe, 1);
-	mdp4_mixer_stage_up(pipe);
-	mdp4_overlay_lcdc_start();
-	mdp4_overlay_lcdc_vsync_push(mfd, pipe);
-	mdp4_iommu_unmap(pipe);
-	mutex_unlock(&mfd->dma->ov_mutex);
+	if (pipe->ov_blt_addr)
+		mdp4_lcdc_wait4ov(0);
+	else
+		mdp4_lcdc_wait4dmap(0);
 }
diff --git a/drivers/video/msm/mdp4_overlay_mddi.c b/drivers/video/msm/mdp4_overlay_mddi.c
index c4e6793..103419e 100644
--- a/drivers/video/msm/mdp4_overlay_mddi.c
+++ b/drivers/video/msm/mdp4_overlay_mddi.c
@@ -354,6 +354,10 @@
 	outpdw(overlay_base + 0x001c, addr2);
 }
 
+void mdp4_primary_rdptr(void)
+{
+}
+
 /*
  * mdp4_dmap_done_mddi: called from isr
  */
diff --git a/drivers/video/msm/mdp4_util.c b/drivers/video/msm/mdp4_util.c
index 4c0e28f..e76b8ba 100644
--- a/drivers/video/msm/mdp4_util.c
+++ b/drivers/video/msm/mdp4_util.c
@@ -409,56 +409,69 @@
 		goto out;
 
 	panel = mdp4_overlay_panel_list();
-	if (isr & INTR_PRIMARY_VSYNC) {
-		mdp4_stat.intr_vsync_p++;
+
+	if (isr & INTR_DMA_P_DONE) {
+		mdp4_stat.intr_dma_p++;
 		dma = &dma2_data;
-		spin_lock(&mdp_spin_lock);
-		mdp_intr_mask &= ~INTR_PRIMARY_VSYNC;
-		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-		dma->waiting = FALSE;
 		if (panel & MDP4_PANEL_LCDC)
-			mdp4_primary_vsync_lcdc();
+			mdp4_dmap_done_lcdc(0);
+#ifdef CONFIG_FB_MSM_OVERLAY
 #ifdef CONFIG_FB_MSM_MIPI_DSI
 		else if (panel & MDP4_PANEL_DSI_VIDEO)
-			mdp4_primary_vsync_dsi_video();
+			mdp4_dmap_done_dsi_video(0);
+		else if (panel & MDP4_PANEL_DSI_CMD)
+			mdp4_dmap_done_dsi_cmd(0);
+#else
+		else { /* MDDI */
+			mdp4_dma_p_done_mddi(dma);
+			mdp_pipe_ctrl(MDP_DMA2_BLOCK,
+				MDP_BLOCK_POWER_OFF, TRUE);
+			complete(&dma->comp);
+		}
 #endif
-		spin_unlock(&mdp_spin_lock);
+#else
+		else {
+			spin_lock(&mdp_spin_lock);
+			dma->busy = FALSE;
+			spin_unlock(&mdp_spin_lock);
+			complete(&dma->comp);
+		}
+#endif
 	}
-#ifdef CONFIG_FB_MSM_DTV
-	if (isr & INTR_EXTERNAL_VSYNC) {
-		mdp4_stat.intr_vsync_e++;
-		dma = &dma_e_data;
-		spin_lock(&mdp_spin_lock);
-		mdp_intr_mask &= ~INTR_EXTERNAL_VSYNC;
-		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-		dma->waiting = FALSE;
-		if (panel & MDP4_PANEL_DTV)
-			mdp4_external_vsync_dtv();
-		spin_unlock(&mdp_spin_lock);
-	}
+	if (isr & INTR_DMA_S_DONE) {
+		mdp4_stat.intr_dma_s++;
+#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
+		dma = &dma2_data;
+#else
+		dma = &dma_s_data;
 #endif
 
+		dma->busy = FALSE;
+		mdp_pipe_ctrl(MDP_DMA_S_BLOCK,
+				MDP_BLOCK_POWER_OFF, TRUE);
+		complete(&dma->comp);
+	}
+	if (isr & INTR_DMA_E_DONE) {
+		mdp4_stat.intr_dma_e++;
+		if (panel & MDP4_PANEL_DTV)
+			mdp4_dmae_done_dtv();
+	}
 #ifdef CONFIG_FB_MSM_OVERLAY
 	if (isr & INTR_OVERLAY0_DONE) {
 		mdp4_stat.intr_overlay0++;
 		dma = &dma2_data;
 		if (panel & (MDP4_PANEL_LCDC | MDP4_PANEL_DSI_VIDEO)) {
 			/* disable LCDC interrupt */
-			spin_lock(&mdp_spin_lock);
-			mdp_intr_mask &= ~INTR_OVERLAY0_DONE;
-			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-			dma->waiting = FALSE;
-			spin_unlock(&mdp_spin_lock);
 			if (panel & MDP4_PANEL_LCDC)
-				mdp4_overlay0_done_lcdc(dma);
+				mdp4_overlay0_done_lcdc(0);
 #ifdef CONFIG_FB_MSM_MIPI_DSI
 			else if (panel & MDP4_PANEL_DSI_VIDEO)
-				mdp4_overlay0_done_dsi_video(dma);
+				mdp4_overlay0_done_dsi_video(0);
 #endif
 		} else {        /* MDDI, DSI_CMD  */
 #ifdef CONFIG_FB_MSM_MIPI_DSI
 			if (panel & MDP4_PANEL_DSI_CMD)
-				mdp4_overlay0_done_dsi_cmd(dma);
+				mdp4_overlay0_done_dsi_cmd(0);
 #else
 			if (panel & MDP4_PANEL_MDDI)
 				mdp4_overlay0_done_mddi(dma);
@@ -500,75 +513,20 @@
 #endif
 #endif	/* OVERLAY */
 
-	if (isr & INTR_DMA_P_DONE) {
-		mdp4_stat.intr_dma_p++;
-		dma = &dma2_data;
-		if (panel & MDP4_PANEL_LCDC) {
-			/* disable LCDC interrupt */
-			spin_lock(&mdp_spin_lock);
-			mdp_intr_mask &= ~INTR_DMA_P_DONE;
-			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-			dma->waiting = FALSE;
-			mdp4_dma_p_done_lcdc();
-			spin_unlock(&mdp_spin_lock);
-		}
-#ifdef CONFIG_FB_MSM_OVERLAY
-#ifdef CONFIG_FB_MSM_MIPI_DSI
-		else if (panel & MDP4_PANEL_DSI_VIDEO) {
-			/* disable LCDC interrupt */
-			spin_lock(&mdp_spin_lock);
-			mdp_intr_mask &= ~INTR_DMA_P_DONE;
-			outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-			dma->waiting = FALSE;
-			mdp4_dma_p_done_dsi_video(dma);
-			spin_unlock(&mdp_spin_lock);
-		} else if (panel & MDP4_PANEL_DSI_CMD) {
-			mdp4_dma_p_done_dsi(dma);
-		}
-#else
-		else { /* MDDI */
-			mdp4_dma_p_done_mddi(dma);
-			mdp_pipe_ctrl(MDP_DMA2_BLOCK,
-				MDP_BLOCK_POWER_OFF, TRUE);
-			complete(&dma->comp);
-		}
-#endif
-#else
-		else {
-			spin_lock(&mdp_spin_lock);
-			dma->busy = FALSE;
-			spin_unlock(&mdp_spin_lock);
-			complete(&dma->comp);
-		}
-#endif
+	if (isr & INTR_PRIMARY_VSYNC) {
+		mdp4_stat.intr_vsync_p++;
+		if (panel & MDP4_PANEL_LCDC)
+			mdp4_primary_vsync_lcdc();
+		else if (panel & MDP4_PANEL_DSI_VIDEO)
+			mdp4_primary_vsync_dsi_video();
 	}
-	if (isr & INTR_DMA_S_DONE) {
-		mdp4_stat.intr_dma_s++;
-#if defined(CONFIG_FB_MSM_OVERLAY) && defined(CONFIG_FB_MSM_MDDI)
-		dma = &dma2_data;
-#else
-		dma = &dma_s_data;
+#ifdef CONFIG_FB_MSM_DTV
+	if (isr & INTR_EXTERNAL_VSYNC) {
+		mdp4_stat.intr_vsync_e++;
+		if (panel & MDP4_PANEL_DTV)
+			mdp4_external_vsync_dtv();
+	}
 #endif
-
-		dma->busy = FALSE;
-		mdp_pipe_ctrl(MDP_DMA_S_BLOCK,
-				MDP_BLOCK_POWER_OFF, TRUE);
-		complete(&dma->comp);
-	}
-	if (isr & INTR_DMA_E_DONE) {
-		mdp4_stat.intr_dma_e++;
-		dma = &dma_e_data;
-		spin_lock(&mdp_spin_lock);
-		mdp_intr_mask &= ~INTR_DMA_E_DONE;
-		outp32(MDP_INTR_ENABLE, mdp_intr_mask);
-		dma->busy = FALSE;
-		mdp4_dma_e_done_dtv();
-		if (dma->waiting) {
-			dma->waiting = FALSE;
-			complete(&dma->comp);
-		}
-		spin_unlock(&mdp_spin_lock);
-	}
 	if (isr & INTR_DMA_P_HISTOGRAM) {
 		mdp4_stat.intr_histogram++;
 		ret = mdp_histogram_block2mgmt(MDP_BLOCK_DMA_P, &mgmt);
@@ -593,6 +551,10 @@
 		if (!ret)
 			mdp_histogram_handle_isr(mgmt);
 	}
+	if (isr & INTR_PRIMARY_RDPTR) {
+		mdp4_stat.intr_rdptr++;
+		mdp4_primary_rdptr();
+	}
 
 out:
 	mdp_is_in_isr = FALSE;
@@ -2672,9 +2634,9 @@
 					DISPLAY_READ_DOMAIN, GEN_POOL);
 			}
 			ion_free(mfd->iclient, buf->ihdl);
-			pr_debug("%s:%d free writeback imem\n", __func__,
-				__LINE__);
 			buf->ihdl = NULL;
+			pr_info("%s:%d free ION writeback imem",
+					__func__, __LINE__);
 		}
 	} else {
 		if (buf->write_addr) {
@@ -3276,71 +3238,92 @@
 	return valid;
 }
 
-static int mdp4_qseed_write_cfg(struct mdp_qseed_cfg_data *cfg)
+int mdp4_qseed_access_cfg(struct mdp_qseed_cfg *config, uint32_t base)
 {
 	int i, ret = 0;
-	uint32_t base = (uint32_t) (MDP_BASE + mdp_block2base(cfg->block));
 	uint32_t *values;
 
-	if ((cfg->table_num != 1) && (cfg->table_num != 2)) {
+	if ((config->table_num != 1) && (config->table_num != 2)) {
 		ret = -ENOTTY;
 		goto error;
 	}
 
-	if (((cfg->table_num == 1) && (cfg->len != QSEED_TABLE_1_COUNT)) ||
-		((cfg->table_num == 2) && (cfg->len != QSEED_TABLE_2_COUNT))) {
+	if (((config->table_num == 1) && (config->len != QSEED_TABLE_1_COUNT))
+			|| ((config->table_num == 2) &&
+				(config->len != QSEED_TABLE_2_COUNT))) {
 		ret = -EINVAL;
 		goto error;
 	}
 
-	values = kmalloc(cfg->len * sizeof(uint32_t), GFP_KERNEL);
+	values = kmalloc(config->len * sizeof(uint32_t), GFP_KERNEL);
 	if (!values) {
 		ret = -ENOMEM;
 		goto error;
 	}
 
-	ret = copy_from_user(values, cfg->data, sizeof(uint32_t) * cfg->len);
+	base += (config->table_num == 1) ? MDP4_QSEED_TABLE1_OFF :
+							MDP4_QSEED_TABLE2_OFF;
 
-	base += (cfg->table_num == 1) ? MDP4_QSEED_TABLE1_OFF :
-						MDP4_QSEED_TABLE2_OFF;
-	for (i = 0; i < cfg->len; i++) {
-		MDP_OUTP(base , values[i]);
-		base += sizeof(uint32_t);
+	if (config->ops & MDP_PP_OPS_WRITE) {
+		ret = copy_from_user(values, config->data,
+						sizeof(uint32_t) * config->len);
+		if (ret) {
+			pr_warn("%s: Error copying from user, %d", __func__,
+									ret);
+			ret = -EINVAL;
+			goto err_mem;
+		}
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		for (i = 0; i < config->len; i++) {
+			if (!(base & 0x3FF))
+				wmb();
+			MDP_OUTP(base , values[i]);
+			base += sizeof(uint32_t);
+		}
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+	} else if (config->ops & MDP_PP_OPS_READ) {
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
+		for (i = 0; i < config->len; i++) {
+			values[i] = inpdw(base);
+			if (!(base & 0x3FF))
+				rmb();
+			base += sizeof(uint32_t);
+		}
+		mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
+		ret = copy_to_user(config->data, values,
+						sizeof(uint32_t) * config->len);
+		if (ret) {
+			pr_warn("%s: Error copying to user, %d", __func__, ret);
+			ret = -EINVAL;
+			goto err_mem;
+		}
 	}
 
+err_mem:
 	kfree(values);
 error:
 	return ret;
 }
 
-int mdp4_qseed_cfg(struct mdp_qseed_cfg_data *cfg)
+int mdp4_qseed_cfg(struct mdp_qseed_cfg_data *config)
 {
 	int ret = 0;
+	struct mdp_qseed_cfg *cfg = &config->qseed_data;
+	uint32_t base;
 
-	if (!mdp4_pp_block2qseed(cfg->block)) {
+	if (!mdp4_pp_block2qseed(config->block)) {
 		ret = -ENOTTY;
 		goto error;
 	}
 
-	if (cfg->table_num != 1) {
-		ret = -ENOTTY;
-		pr_info("%s: Only QSEED table1 supported.\n", __func__);
+	if ((cfg->ops & MDP_PP_OPS_READ) && (cfg->ops & MDP_PP_OPS_WRITE)) {
+		ret = -EPERM;
+		pr_warn("%s: Cannot read and write on the same request\n",
+								__func__);
 		goto error;
 	}
-
-	switch ((cfg->ops & 0x6) >> 1) {
-	case 0x1:
-		pr_info("%s: QSEED read not supported\n", __func__);
-		ret = -ENOTTY;
-		break;
-	case 0x2:
-		ret = mdp4_qseed_write_cfg(cfg);
-		if (ret)
-			goto error;
-		break;
-	default:
-		break;
-	}
+	base = (uint32_t) (MDP_BASE + mdp_block2base(config->block));
+	ret = mdp4_qseed_access_cfg(cfg, base);
 
 error:
 	return ret;
diff --git a/drivers/video/msm/mdp_debugfs.c b/drivers/video/msm/mdp_debugfs.c
index 4a0ea4c..0fad0a7 100644
--- a/drivers/video/msm/mdp_debugfs.c
+++ b/drivers/video/msm/mdp_debugfs.c
@@ -337,7 +337,7 @@
 	bp += len;
 	dlen -= len;
 	len = snprintf(bp, dlen, "read_ptr: %08lu\n\n",
-					mdp4_stat.intr_rd_ptr);
+					mdp4_stat.intr_rdptr);
 	bp += len;
 	dlen -= len;
 	len = snprintf(bp, dlen, "dsi:\n");
@@ -412,10 +412,14 @@
 					mdp4_stat.overlay_unset[0]);
 	bp += len;
 	dlen -= len;
-	len = snprintf(bp, dlen, "play:  %08lu\n",
+	len = snprintf(bp, dlen, "play:  %08lu\t",
 					mdp4_stat.overlay_play[0]);
 	bp += len;
 	dlen -= len;
+	len = snprintf(bp, dlen, "commit:  %08lu\n",
+					mdp4_stat.overlay_commit[0]);
+	bp += len;
+	dlen -= len;
 
 	len = snprintf(bp, dlen, "overlay1_play:\n");
 	bp += len;
@@ -428,29 +432,56 @@
 					mdp4_stat.overlay_unset[1]);
 	bp += len;
 	dlen -= len;
-	len = snprintf(bp, dlen, "play:  %08lu\n\n",
+	len = snprintf(bp, dlen, "play:  %08lu\t",
 					mdp4_stat.overlay_play[1]);
 
 	bp += len;
 	dlen -= len;
+	len = snprintf(bp, dlen, "commit:  %08lu\n\n",
+					mdp4_stat.overlay_commit[1]);
+	bp += len;
+	dlen -= len;
 
 	len = snprintf(bp, dlen, "frame_push:\n");
 	bp += len;
 	dlen -= len;
-	len = snprintf(bp, dlen, "rgb1:   %08lu\t",
-		       mdp4_stat.pipe[OVERLAY_PIPE_RGB1]);
+	len = snprintf(bp, dlen, "vg1 :   %08lu\t", mdp4_stat.pipe[0]);
 	bp += len;
 	dlen -= len;
-	len = snprintf(bp, dlen, "rgb2:   %08lu\n",
-		       mdp4_stat.pipe[OVERLAY_PIPE_RGB2]);
+	len = snprintf(bp, dlen, "vg2 :   %08lu\t", mdp4_stat.pipe[1]);
 	bp += len;
 	dlen -= len;
-	len = snprintf(bp, dlen, "vg1 :   %08lu\t",
-		       mdp4_stat.pipe[OVERLAY_PIPE_VG1]);
+	len = snprintf(bp, dlen, "vg3 :   %08lu\n", mdp4_stat.pipe[5]);
 	bp += len;
 	dlen -= len;
-	len = snprintf(bp, dlen, "vg2 :   %08lu\n",
-		       mdp4_stat.pipe[OVERLAY_PIPE_VG2]);
+	len = snprintf(bp, dlen, "rgb1:   %08lu\t", mdp4_stat.pipe[2]);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "rgb2:   %08lu\t", mdp4_stat.pipe[3]);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "rgb3:   %08lu\n\n", mdp4_stat.pipe[4]);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "wait4vsync: ");
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "mixer0 : %08lu\t", mdp4_stat.wait4vsync0);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "mixer1: %08lu\n\n", mdp4_stat.wait4vsync1);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "iommu: ");
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "map : %08lu\t", mdp4_stat.iommu_map);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "unmap: %08lu\t", mdp4_stat.iommu_unmap);
+	bp += len;
+	dlen -= len;
+	len = snprintf(bp, dlen, "drop: %08lu\n\n", mdp4_stat.iommu_drop);
 	bp += len;
 	dlen -= len;
 	len = snprintf(bp, dlen, "err_mixer : %08lu\t", mdp4_stat.err_mixer);
diff --git a/drivers/video/msm/mdp_vsync.c b/drivers/video/msm/mdp_vsync.c
index 87e74d9..966b40d 100644
--- a/drivers/video/msm/mdp_vsync.c
+++ b/drivers/video/msm/mdp_vsync.c
@@ -73,6 +73,20 @@
 static unsigned char timer_shutdown_flag;
 static uint32 vsync_cnt_cfg;
 
+
+
+void vsync_clk_enable()
+{
+	if (mdp_vsync_clk)
+		clk_prepare_enable(mdp_vsync_clk);
+}
+
+void vsync_clk_disable()
+{
+	if (mdp_vsync_clk)
+		clk_disable_unprepare(mdp_vsync_clk);
+}
+
 void mdp_hw_vsync_clk_enable(struct msm_fb_data_type *mfd)
 {
 	if (vsync_clk_status == 1)
diff --git a/drivers/video/msm/mdss/mdss_fb.c b/drivers/video/msm/mdss/mdss_fb.c
index a96bf3a..ee086ad 100644
--- a/drivers/video/msm/mdss/mdss_fb.c
+++ b/drivers/video/msm/mdss/mdss_fb.c
@@ -774,17 +774,23 @@
 	fix->type = panel_info->is_3d_panel;
 	fix->line_length = mdss_fb_line_length(mfd->index, panel_info->xres,
 					       bpp);
-	mfd->var_xres = panel_info->xres;
-	mfd->var_yres = panel_info->yres;
-
-	var->pixclock = mfd->panel_info.clk_rate;
-	mfd->var_pixclock = var->pixclock;
 
 	var->xres = panel_info->xres;
 	var->yres = panel_info->yres;
 	var->xres_virtual = panel_info->xres;
 	var->yres_virtual = panel_info->yres * mfd->fb_page;
 	var->bits_per_pixel = bpp * 8;	/* FrameBuffer color depth */
+	var->upper_margin = panel_info->lcdc.v_front_porch;
+	var->lower_margin = panel_info->lcdc.v_back_porch;
+	var->vsync_len = panel_info->lcdc.v_pulse_width;
+	var->left_margin = panel_info->lcdc.h_front_porch;
+	var->right_margin = panel_info->lcdc.h_back_porch;
+	var->hsync_len = panel_info->lcdc.h_pulse_width;
+	var->pixclock = panel_info->clk_rate / 1000;
+
+	mfd->var_xres = var->xres;
+	mfd->var_yres = var->yres;
+	mfd->var_pixclock = var->pixclock;
 
 	/* id field for fb app  */
 
@@ -796,6 +802,7 @@
 	fbi->flags = FBINFO_FLAG_DEFAULT;
 	fbi->pseudo_palette = mdss_fb_pseudo_palette;
 
+	panel_info->fbi = fbi;
 	mfd->ref_cnt = 0;
 	mfd->panel_power_on = false;
 
diff --git a/drivers/video/msm/mdss/mdss_mdp_overlay.c b/drivers/video/msm/mdss/mdss_mdp_overlay.c
index 43ddb5e..2e9a2dc 100644
--- a/drivers/video/msm/mdss/mdss_mdp_overlay.c
+++ b/drivers/video/msm/mdss/mdss_mdp_overlay.c
@@ -115,11 +115,10 @@
 	if (fmt->is_yuv) {
 		if ((req->src_rect.x & 0x1) || (req->src_rect.y & 0x1) ||
 		    (req->src_rect.w & 0x1) || (req->src_rect.h & 0x1)) {
-			pr_err("invalid odd src resolution\n");
+			pr_err("invalid odd src resolution or coordinates\n");
 			return -EINVAL;
 		}
-		if ((req->dst_rect.x & 0x1) || (req->dst_rect.y & 0x1) ||
-		    (req->dst_rect.w & 0x1) || (req->dst_rect.h & 0x1)) {
+		if ((req->dst_rect.w & 0x1) || (req->dst_rect.h & 0x1)) {
 			pr_err("invalid odd dst resolution\n");
 			return -EINVAL;
 		}
diff --git a/drivers/video/msm/mdss/mdss_mdp_pipe.c b/drivers/video/msm/mdss/mdss_mdp_pipe.c
index 52f4324..d9a148e 100644
--- a/drivers/video/msm/mdss/mdss_mdp_pipe.c
+++ b/drivers/video/msm/mdss/mdss_mdp_pipe.c
@@ -354,11 +354,12 @@
 	u32 chroma_sample;
 
 	if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA) {
-		if (!(pipe->flags & MDP_ROT_90) && (pipe->dst.h != pipe->src.h
-						|| pipe->dst.w != pipe->src.w))
-			return -EINVAL;	/* no scaling supported on dma pipes */
-		else
+		if (pipe->dst.h != pipe->src.h || pipe->dst.w != pipe->src.w) {
+			pr_err("no scaling supported on dma pipe\n");
+			return -EINVAL;
+		} else {
 			return 0;
+		}
 	}
 
 	chroma_sample = pipe->src_fmt->chroma_sample;
@@ -535,10 +536,10 @@
 	pr_debug("pnum=%d format=%d opmode=%x\n", pipe->num, fmt->format,
 			opmode);
 
-	rot90 = !!(pipe->flags & MDP_SOURCE_ROTATED_90);
+	rot90 = !!(pipe->flags & MDP_ROT_90);
 
 	chroma_samp = fmt->chroma_sample;
-	if (rot90) {
+	if (pipe->flags & MDP_SOURCE_ROTATED_90) {
 		if (chroma_samp == MDSS_MDP_CHROMA_H2V1)
 			chroma_samp = MDSS_MDP_CHROMA_H1V2;
 		else if (chroma_samp == MDSS_MDP_CHROMA_H1V2)
diff --git a/drivers/video/msm/mdss/mdss_mdp_rotator.c b/drivers/video/msm/mdss/mdss_mdp_rotator.c
index fc3a843..dc1cb0d 100644
--- a/drivers/video/msm/mdss/mdss_mdp_rotator.c
+++ b/drivers/video/msm/mdss/mdss_mdp_rotator.c
@@ -212,6 +212,7 @@
 		rot_pipe->img_width = rot->img_width;
 		rot_pipe->img_height = rot->img_height;
 		rot_pipe->src = rot->src_rect;
+		rot_pipe->dst = rot->src_rect;
 		rot_pipe->bwc_mode = rot->bwc_mode;
 		rot_pipe->params_changed++;
 	}
diff --git a/drivers/video/msm/mdss/mdss_mdp_wb.c b/drivers/video/msm/mdss/mdss_mdp_wb.c
index da55edc..26e459f 100644
--- a/drivers/video/msm/mdss/mdss_mdp_wb.c
+++ b/drivers/video/msm/mdss/mdss_mdp_wb.c
@@ -60,32 +60,40 @@
 static struct mdss_mdp_wb mdss_mdp_wb_info;
 
 #ifdef DEBUG_WRITEBACK
-/* for debugging: writeback output buffer to framebuffer memory */
+/* for debugging: writeback output buffer to allocated memory */
 static inline
 struct mdss_mdp_data *mdss_mdp_wb_debug_buffer(struct msm_fb_data_type *mfd)
 {
+	static struct ion_handle *ihdl;
 	static void *videomemory;
-	static void *mdss_wb_mem;
-	static struct mdss_mdp_data buffer = {
-		.num_planes = 1,
-	};
-
+	static ion_phys_addr_t mdss_wb_mem;
+	static struct mdss_mdp_data buffer = { .num_planes = 1,	};
 	struct fb_info *fbi;
-	int img_size;
-	int offset;
-
+	size_t img_size;
 
 	fbi = mfd->fbi;
 	img_size = fbi->var.xres * fbi->var.yres * fbi->var.bits_per_pixel / 8;
-	offset = fbi->fix.smem_len - img_size;
 
-	videomemory = fbi->screen_base + offset;
-	mdss_wb_mem = (void *)(fbi->fix.smem_start + offset);
+	if (ihdl == NULL) {
+		ihdl = ion_alloc(mfd->iclient, img_size, SZ_4K,
+				 ION_HEAP(ION_SF_HEAP_ID));
+		if (!IS_ERR_OR_NULL(ihdl)) {
+			videomemory = ion_map_kernel(mfd->iclient, ihdl, 0);
+			ion_phys(mfd->iclient, ihdl, &mdss_wb_mem, &img_size);
+		} else {
+			pr_err("unable to alloc fbmem from ion (%p)\n", ihdl);
+			ihdl = NULL;
+		}
+	}
 
-	buffer.p[0].addr = fbi->fix.smem_start + offset;
-	buffer.p[0].len = img_size;
+	if (mdss_wb_mem) {
+		buffer.p[0].addr = (u32) mdss_wb_mem;
+		buffer.p[0].len = img_size;
 
-	return &buffer;
+		return &buffer;
+	}
+
+	return NULL;
 }
 #else
 static inline
@@ -227,6 +235,7 @@
 	}
 
 	node->buf_data.num_planes = 1;
+	node->buf_info = *data;
 	buf = &node->buf_data.p[0];
 	buf->addr = (u32) (data->iova + data->offset);
 	buf->len = UINT_MAX; /* trusted source */
diff --git a/drivers/video/msm/mdss/mdss_panel.h b/drivers/video/msm/mdss/mdss_panel.h
index 3fd943d..0411d8e 100644
--- a/drivers/video/msm/mdss/mdss_panel.h
+++ b/drivers/video/msm/mdss/mdss_panel.h
@@ -161,6 +161,7 @@
 	struct lcdc_panel_info lcdc;
 	struct mipi_panel_info mipi;
 	struct lvds_panel_info lvds;
+	struct fb_info *fbi;
 };
 
 struct mdss_panel_data {
diff --git a/drivers/video/msm/mdss/mdss_wb.c b/drivers/video/msm/mdss/mdss_wb.c
index 3be4525..a26d339 100644
--- a/drivers/video/msm/mdss/mdss_wb.c
+++ b/drivers/video/msm/mdss/mdss_wb.c
@@ -73,7 +73,7 @@
 	pdata->panel_info.type = WRITEBACK_PANEL;
 	pdata->panel_info.clk_rate = 74250000;
 	pdata->panel_info.pdest = DISPLAY_3;
-	pdata->panel_info.out_format = MDP_RGB_888;
+	pdata->panel_info.out_format = MDP_Y_CBCR_H2V2;
 
 	pdata->on = mdss_wb_on;
 	pdata->off = mdss_wb_off;
diff --git a/drivers/video/msm/mipi_NT35510.c b/drivers/video/msm/mipi_NT35510.c
index e605aed..04178fa 100644
--- a/drivers/video/msm/mipi_NT35510.c
+++ b/drivers/video/msm/mipi_NT35510.c
@@ -486,22 +486,22 @@
 		rotate = mipi_nt35510_pdata->rotate_panel();
 
 	if (mipi->mode == DSI_VIDEO_MODE) {
-		mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+		mipi_dsi_cmds_tx(&nt35510_tx_buf,
 			nt35510_video_display_on_cmds,
 			ARRAY_SIZE(nt35510_video_display_on_cmds));
 
 		if (rotate) {
-			mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+			mipi_dsi_cmds_tx(&nt35510_tx_buf,
 				nt35510_video_display_on_cmds_rotate,
 			ARRAY_SIZE(nt35510_video_display_on_cmds_rotate));
 		}
 	} else if (mipi->mode == DSI_CMD_MODE) {
-		mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+		mipi_dsi_cmds_tx(&nt35510_tx_buf,
 			nt35510_cmd_display_on_cmds,
 			ARRAY_SIZE(nt35510_cmd_display_on_cmds));
 
 		if (rotate) {
-			mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf,
+			mipi_dsi_cmds_tx(&nt35510_tx_buf,
 				nt35510_cmd_display_on_cmds_rotate,
 			ARRAY_SIZE(nt35510_cmd_display_on_cmds_rotate));
 		}
@@ -523,7 +523,7 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(mfd, &nt35510_tx_buf, nt35510_display_off_cmds,
+	mipi_dsi_cmds_tx(&nt35510_tx_buf, nt35510_display_off_cmds,
 			ARRAY_SIZE(nt35510_display_off_cmds));
 
 	pr_debug("mipi_nt35510_lcd_off X\n");
diff --git a/drivers/video/msm/mipi_dsi.c b/drivers/video/msm/mipi_dsi.c
index ff8fd17..b4fb930 100644
--- a/drivers/video/msm/mipi_dsi.c
+++ b/drivers/video/msm/mipi_dsi.c
@@ -80,25 +80,6 @@
 	mdp4_overlay_dsi_state_set(ST_DSI_SUSPEND);
 
 	/*
-	 * Description: dsi clock is need to perform shutdown.
-	 * mdp4_dsi_cmd_dma_busy_wait() will enable dsi clock if disabled.
-	 * also, wait until dma (overlay and dmap) finish.
-	 */
-	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
-		if (mdp_rev >= MDP_REV_41) {
-			mdp4_dsi_cmd_del_timer();
-			mdp4_dsi_cmd_dma_busy_wait(mfd);
-			mdp4_dsi_blt_dmap_busy_wait(mfd);
-			mipi_dsi_mdp_busy_wait(mfd);
-		} else {
-			mdp3_dsi_cmd_dma_busy_wait(mfd);
-		}
-	} else {
-		/* video mode, wait until fifo cleaned */
-		mipi_dsi_controller_cfg(0);
-	}
-
-	/*
 	 * Desctiption: change to DSI_CMD_MODE since it needed to
 	 * tx DCS dsiplay off comamnd to panel
 	 */
diff --git a/drivers/video/msm/mipi_dsi.h b/drivers/video/msm/mipi_dsi.h
index 2bc49c0..ebbf362 100644
--- a/drivers/video/msm/mipi_dsi.h
+++ b/drivers/video/msm/mipi_dsi.h
@@ -264,8 +264,7 @@
 void mipi_dsi_bist_ctrl(void);
 int mipi_dsi_buf_alloc(struct dsi_buf *, int size);
 int mipi_dsi_cmd_dma_add(struct dsi_buf *dp, struct dsi_cmd_desc *cm);
-int mipi_dsi_cmds_tx(struct msm_fb_data_type *mfd,
-		struct dsi_buf *dp, struct dsi_cmd_desc *cmds, int cnt);
+int mipi_dsi_cmds_tx(struct dsi_buf *dp, struct dsi_cmd_desc *cmds, int cnt);
 
 int mipi_dsi_cmd_dma_tx(struct dsi_buf *dp);
 int mipi_dsi_cmd_reg_tx(uint32 data);
@@ -278,10 +277,14 @@
 void mipi_dsi_cmd_mode_ctrl(int enable);
 void mdp4_dsi_cmd_trigger(void);
 void mipi_dsi_cmd_mdp_start(void);
+int mipi_dsi_ctrl_lock(int mdp);
+int mipi_dsi_ctrl_lock_query(void);
 void mipi_dsi_cmd_bta_sw_trigger(void);
 void mipi_dsi_ack_err_status(void);
 void mipi_dsi_set_tear_on(struct msm_fb_data_type *mfd);
 void mipi_dsi_set_tear_off(struct msm_fb_data_type *mfd);
+void mipi_dsi_set_backlight(struct msm_fb_data_type *mfd, int level);
+void mipi_dsi_cmd_backlight_tx(int level);
 void mipi_dsi_clk_enable(void);
 void mipi_dsi_clk_disable(void);
 void mipi_dsi_pre_kickoff_action(void);
@@ -310,6 +313,7 @@
 void cont_splash_clk_ctrl(int enable);
 void mipi_dsi_turn_on_clks(void);
 void mipi_dsi_turn_off_clks(void);
+void mipi_dsi_clk_cfg(int on);
 
 #ifdef CONFIG_FB_MSM_MDP303
 void update_lane_config(struct msm_panel_info *pinfo);
diff --git a/drivers/video/msm/mipi_dsi_host.c b/drivers/video/msm/mipi_dsi_host.c
index 7f1a435..4afffb0 100644
--- a/drivers/video/msm/mipi_dsi_host.c
+++ b/drivers/video/msm/mipi_dsi_host.c
@@ -46,6 +46,7 @@
 static spinlock_t dsi_irq_lock;
 static spinlock_t dsi_mdp_lock;
 spinlock_t dsi_clk_lock;
+static int dsi_ctrl_lock;
 static int dsi_mdp_busy;
 
 static struct list_head pre_kickoff_list;
@@ -146,6 +147,30 @@
 	spin_unlock(&dsi_irq_lock);
 }
 
+void mipi_dsi_clk_cfg(int on)
+{
+	unsigned long flags;
+	static int dsi_clk_cnt;
+
+	spin_lock_irqsave(&mdp_spin_lock, flags);
+	if (on) {
+		if (dsi_clk_cnt == 0) {
+			mipi_dsi_ahb_ctrl(1);
+			mipi_dsi_clk_enable();
+		}
+		dsi_clk_cnt++;
+	} else {
+		if (dsi_clk_cnt) {
+			dsi_clk_cnt--;
+			if (dsi_clk_cnt == 0) {
+				mipi_dsi_clk_disable();
+				mipi_dsi_ahb_ctrl(0);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&mdp_spin_lock, flags);
+}
+
 void mipi_dsi_turn_on_clks(void)
 {
 	mipi_dsi_ahb_ctrl(1);
@@ -982,6 +1007,27 @@
 	wmb();
 }
 
+int mipi_dsi_ctrl_lock(int mdp)
+{
+	unsigned long flag;
+	int lock = 0;
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	if (dsi_ctrl_lock == FALSE) {
+		dsi_ctrl_lock = TRUE;
+		lock = 1;
+		if (lock && mdp)	/* mdp pixel */
+			mipi_dsi_enable_irq();
+	}
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+	return lock;
+}
+
+int mipi_dsi_ctrl_lock_query()
+{
+	return dsi_ctrl_lock;
+}
+
 void mipi_dsi_mdp_busy_wait(struct msm_fb_data_type *mfd)
 {
 	unsigned long flag;
@@ -1011,19 +1057,14 @@
 {
 	unsigned long flag;
 
-
-	if (!in_interrupt())
-		mipi_dsi_pre_kickoff_action();
-
 	mipi_dsi_mdp_stat_inc(STAT_DSI_START);
 
 	spin_lock_irqsave(&dsi_mdp_lock, flag);
-	mipi_dsi_enable_irq();
-	dsi_mdp_busy = TRUE;
+	 mipi_dsi_enable_irq();
+	 dsi_mdp_busy = TRUE;
 	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
 }
 
-
 void mipi_dsi_cmd_bta_sw_trigger(void)
 {
 	uint32 data;
@@ -1055,13 +1096,13 @@
 void mipi_dsi_set_tear_on(struct msm_fb_data_type *mfd)
 {
 	mipi_dsi_buf_init(&dsi_tx_buf);
-	mipi_dsi_cmds_tx(mfd, &dsi_tx_buf, &dsi_tear_on_cmd, 1);
+	mipi_dsi_cmds_tx(&dsi_tx_buf, &dsi_tear_on_cmd, 1);
 }
 
 void mipi_dsi_set_tear_off(struct msm_fb_data_type *mfd)
 {
 	mipi_dsi_buf_init(&dsi_tx_buf);
-	mipi_dsi_cmds_tx(mfd, &dsi_tx_buf, &dsi_tear_off_cmd, 1);
+	mipi_dsi_cmds_tx(&dsi_tx_buf, &dsi_tear_off_cmd, 1);
 }
 
 int mipi_dsi_cmd_reg_tx(uint32 data)
@@ -1093,12 +1134,64 @@
 	return 4;
 }
 
+static char led_pwm1[2] = {0x51, 0x0};	/* DTYPE_DCS_WRITE1 */
+
+static struct dsi_cmd_desc backlight_cmd = {
+	DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(led_pwm1), led_pwm1};
+
+/*
+ * mipi_dsi_cmd_backlight_tx:
+ * thread context only
+ */
+void mipi_dsi_cmd_backlight_tx(int level)
+{
+	struct dsi_buf *tp;
+	struct dsi_cmd_desc *cmd;
+	unsigned long flag;
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	dsi_mdp_busy = TRUE;
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
+	led_pwm1[1] = (unsigned char)(level);
+	tp = &dsi_tx_buf;
+	cmd = &backlight_cmd;
+	mipi_dsi_buf_init(&dsi_tx_buf);
+
+
+
+	if (tp->dmap) {
+		dma_unmap_single(&dsi_dev, tp->dmap, tp->len, DMA_TO_DEVICE);
+		tp->dmap = 0;
+	}
+
+	mipi_dsi_enable_irq();
+	mipi_dsi_cmd_dma_add(tp, cmd);
+
+	tp->len += 3;
+	tp->len &= ~0x03;	/* multipled by 4 */
+
+	tp->dmap = dma_map_single(&dsi_dev, tp->data, tp->len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&dsi_dev, tp->dmap))
+		pr_err("%s: dmap mapp failed\n", __func__);
+
+	MIPI_OUTP(MIPI_DSI_BASE + 0x044, tp->dmap);
+	MIPI_OUTP(MIPI_DSI_BASE + 0x048, tp->len);
+	wmb();
+	MIPI_OUTP(MIPI_DSI_BASE + 0x08c, 0x01);	/* trigger */
+	wmb();
+
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	dsi_mdp_busy = FALSE;
+	complete(&dsi_mdp_comp);
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+}
+
 /*
  * mipi_dsi_cmds_tx:
- * ov_mutex need to be acquired before call this function.
+ * thread context only
  */
-int mipi_dsi_cmds_tx(struct msm_fb_data_type *mfd,
-		struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
+int mipi_dsi_cmds_tx(struct dsi_buf *tp, struct dsi_cmd_desc *cmds, int cnt)
 {
 	struct dsi_cmd_desc *cm;
 	uint32 dsi_ctrl, ctrl;
@@ -1115,29 +1208,16 @@
 	if (video_mode) {
 		ctrl = dsi_ctrl | 0x04; /* CMD_MODE_EN */
 		MIPI_OUTP(MIPI_DSI_BASE + 0x0000, ctrl);
-	} else { /* cmd mode */
-		/*
-		 * during boot up, cmd mode is configured
-		 * even it is video mode panel.
-		 */
-		/* make sure mdp dma is not txing pixel data */
-		if (mfd->panel_info.type == MIPI_CMD_PANEL) {
-#ifndef CONFIG_FB_MSM_MDP303
-			mdp4_dsi_cmd_dma_busy_wait(mfd);
-#else
-			mdp3_dsi_cmd_dma_busy_wait(mfd);
-#endif
-		}
 	}
 
 	spin_lock_irqsave(&dsi_mdp_lock, flag);
-	mipi_dsi_enable_irq();
 	dsi_mdp_busy = TRUE;
 	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
 
 	cm = cmds;
 	mipi_dsi_buf_init(tp);
 	for (i = 0; i < cnt; i++) {
+		mipi_dsi_enable_irq();
 		mipi_dsi_buf_init(tp);
 		mipi_dsi_cmd_dma_add(tp, cm);
 		mipi_dsi_cmd_dma_tx(tp);
@@ -1146,15 +1226,14 @@
 		cm++;
 	}
 
-	spin_lock_irqsave(&dsi_mdp_lock, flag);
-	dsi_mdp_busy = FALSE;
-	mipi_dsi_disable_irq();
-	complete(&dsi_mdp_comp);
-	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
-
 	if (video_mode)
 		MIPI_OUTP(MIPI_DSI_BASE + 0x0000, dsi_ctrl); /* restore */
 
+	spin_lock_irqsave(&dsi_mdp_lock, flag);
+	dsi_mdp_busy = FALSE;
+	complete(&dsi_mdp_comp);
+	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
+
 	return cnt;
 }
 
@@ -1183,8 +1262,8 @@
 			struct dsi_cmd_desc *cmds, int rlen)
 {
 	int cnt, len, diff, pkt_size;
-	unsigned long flag;
 	char cmd;
+	unsigned long flag;
 
 	if (mfd->panel_info.mipi.no_max_pkt_size) {
 		/* Only support rlen = 4*n */
@@ -1216,15 +1295,12 @@
 
 	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
 		/* make sure mdp dma is not txing pixel data */
-#ifndef CONFIG_FB_MSM_MDP303
-			mdp4_dsi_cmd_dma_busy_wait(mfd);
-#else
+#ifdef CONFIG_FB_MSM_MDP303
 			mdp3_dsi_cmd_dma_busy_wait(mfd);
 #endif
 	}
 
 	spin_lock_irqsave(&dsi_mdp_lock, flag);
-	mipi_dsi_enable_irq();
 	dsi_mdp_busy = TRUE;
 	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
 
@@ -1232,16 +1308,20 @@
 		/* packet size need to be set at every read */
 		pkt_size = len;
 		max_pktsize[0] = pkt_size;
+		mipi_dsi_enable_irq();
 		mipi_dsi_buf_init(tp);
 		mipi_dsi_cmd_dma_add(tp, pkt_size_cmd);
 		mipi_dsi_cmd_dma_tx(tp);
 	}
 
+	mipi_dsi_enable_irq();
 	mipi_dsi_buf_init(tp);
 	mipi_dsi_cmd_dma_add(tp, cmds);
 
 	/* transmit read comamnd to client */
 	mipi_dsi_cmd_dma_tx(tp);
+
+	mipi_dsi_disable_irq();
 	/*
 	 * once cmd_dma_done interrupt received,
 	 * return data from client is ready and stored
@@ -1260,7 +1340,6 @@
 
 	spin_lock_irqsave(&dsi_mdp_lock, flag);
 	dsi_mdp_busy = FALSE;
-	mipi_dsi_disable_irq();
 	complete(&dsi_mdp_comp);
 	spin_unlock_irqrestore(&dsi_mdp_lock, flag);
 
@@ -1303,7 +1382,6 @@
 
 int mipi_dsi_cmd_dma_tx(struct dsi_buf *tp)
 {
-	int len;
 
 #ifdef DSI_HOST_DEBUG
 	int i;
@@ -1318,25 +1396,24 @@
 	pr_debug("\n");
 #endif
 
-	len = tp->len;
-	len += 3;
-	len &= ~0x03;	/* multipled by 4 */
+	tp->len += 3;
+	tp->len &= ~0x03;	/* multipled by 4 */
 
-	tp->dmap = dma_map_single(&dsi_dev, tp->data, len, DMA_TO_DEVICE);
+	tp->dmap = dma_map_single(&dsi_dev, tp->data, tp->len, DMA_TO_DEVICE);
 	if (dma_mapping_error(&dsi_dev, tp->dmap))
 		pr_err("%s: dmap mapp failed\n", __func__);
 
 	INIT_COMPLETION(dsi_dma_comp);
 
 	MIPI_OUTP(MIPI_DSI_BASE + 0x044, tp->dmap);
-	MIPI_OUTP(MIPI_DSI_BASE + 0x048, len);
+	MIPI_OUTP(MIPI_DSI_BASE + 0x048, tp->len);
 	wmb();
 	MIPI_OUTP(MIPI_DSI_BASE + 0x08c, 0x01);	/* trigger */
 	wmb();
 
 	wait_for_completion(&dsi_dma_comp);
 
-	dma_unmap_single(&dsi_dev, tp->dmap, len, DMA_TO_DEVICE);
+	dma_unmap_single(&dsi_dev, tp->dmap, tp->len, DMA_TO_DEVICE);
 	tp->dmap = 0;
 	return tp->len;
 }
@@ -1459,7 +1536,6 @@
 #ifdef CONFIG_FB_MSM_MDP40
 	mdp4_stat.intr_dsi++;
 #endif
-
 	if (isr & DSI_INTR_ERROR) {
 		mipi_dsi_mdp_stat_inc(STAT_DSI_ERROR);
 		mipi_dsi_error();
@@ -1474,16 +1550,20 @@
 	if (isr & DSI_INTR_CMD_DMA_DONE) {
 		mipi_dsi_mdp_stat_inc(STAT_DSI_CMD);
 		complete(&dsi_dma_comp);
+		spin_lock(&dsi_mdp_lock);
+		dsi_ctrl_lock = FALSE;
+		mipi_dsi_disable_irq_nosync();
+		spin_unlock(&dsi_mdp_lock);
 	}
 
 	if (isr & DSI_INTR_CMD_MDP_DONE) {
 		mipi_dsi_mdp_stat_inc(STAT_DSI_MDP);
 		spin_lock(&dsi_mdp_lock);
-		dsi_mdp_busy = FALSE;
+		dsi_ctrl_lock = FALSE;
 		mipi_dsi_disable_irq_nosync();
-		spin_unlock(&dsi_mdp_lock);
+		dsi_mdp_busy = FALSE;
 		complete(&dsi_mdp_comp);
-		mipi_dsi_post_kickoff_action();
+		spin_unlock(&dsi_mdp_lock);
 	}
 
 
diff --git a/drivers/video/msm/mipi_novatek.c b/drivers/video/msm/mipi_novatek.c
index 0070757..7dd41d2 100644
--- a/drivers/video/msm/mipi_novatek.c
+++ b/drivers/video/msm/mipi_novatek.c
@@ -243,14 +243,9 @@
 	0x2B, 0x00, 0x00, 0x03, 0xBF}; /* 960 - 1 */
 #endif
 
-static char led_pwm1[2] = {0x51, 0x0};	/* DTYPE_DCS_WRITE1 */
 static char led_pwm2[2] = {0x53, 0x24}; /* DTYPE_DCS_WRITE1 */
 static char led_pwm3[2] = {0x55, 0x00}; /* DTYPE_DCS_WRITE1 */
 
-static struct dsi_cmd_desc novatek_cmd_backlight_cmds[] = {
-	{DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(led_pwm1), led_pwm1},
-};
-
 static struct dsi_cmd_desc novatek_video_on_cmds[] = {
 	{DTYPE_DCS_WRITE, 1, 0, 0, 50,
 		sizeof(sw_reset), sw_reset},
@@ -318,7 +313,7 @@
 	cmd = &novatek_manufacture_id_cmd;
 	mipi_dsi_cmds_rx(mfd, tp, rp, cmd, 3);
 	lp = (uint32 *)rp->data;
-	pr_info("%s: manufacture_id=%x", __func__, *lp);
+	pr_info("%s: manufacture_id=%x\n", __func__, *lp);
 	return *lp;
 }
 
@@ -398,16 +393,18 @@
 
 	mipi  = &mfd->panel_info.mipi;
 
-	if (mipi->mode == DSI_VIDEO_MODE) {
-		mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_video_on_cmds,
-			ARRAY_SIZE(novatek_video_on_cmds));
-	} else {
-		mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_cmd_on_cmds,
-			ARRAY_SIZE(novatek_cmd_on_cmds));
+	if (mipi_dsi_ctrl_lock(0)) {
+		if (mipi->mode == DSI_VIDEO_MODE) {
+			mipi_dsi_cmds_tx(&novatek_tx_buf, novatek_video_on_cmds,
+				ARRAY_SIZE(novatek_video_on_cmds));
+		} else {
+			mipi_dsi_cmds_tx(&novatek_tx_buf, novatek_cmd_on_cmds,
+				ARRAY_SIZE(novatek_cmd_on_cmds));
 
-		mipi_dsi_cmd_bta_sw_trigger(); /* clean up ack_err_status */
-
-		mipi_novatek_manufacture_id(mfd);
+			/* clean up ack_err_status */
+			mipi_dsi_cmd_bta_sw_trigger();
+			mipi_novatek_manufacture_id(mfd);
+		}
 	}
 
 	return 0;
@@ -424,14 +421,23 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_display_off_cmds,
+	if (mipi_dsi_ctrl_lock(0)) {
+		mipi_dsi_cmds_tx(&novatek_tx_buf, novatek_display_off_cmds,
 			ARRAY_SIZE(novatek_display_off_cmds));
+	}
 
 	return 0;
 }
 
 DEFINE_LED_TRIGGER(bkl_led_trigger);
 
+#ifdef CONFIG_FB_MSM_MDP303
+void mdp4_backlight_put_level(int cndx, int level)
+{
+	/* do nothing */
+}
+#endif
+
 static void mipi_novatek_set_backlight(struct msm_fb_data_type *mfd)
 {
 	struct mipi_panel_info *mipi;
@@ -443,21 +449,7 @@
 	}
 	mipi  = &mfd->panel_info.mipi;
 
-	mutex_lock(&mfd->dma->ov_mutex);
-	if (mdp4_overlay_dsi_state_get() <= ST_DSI_SUSPEND) {
-		mutex_unlock(&mfd->dma->ov_mutex);
-		return;
-	}
-	/* mdp4_dsi_cmd_busy_wait: will turn on dsi clock also */
-	mdp4_dsi_cmd_dma_busy_wait(mfd);
-	mdp4_dsi_blt_dmap_busy_wait(mfd);
-	mipi_dsi_mdp_busy_wait(mfd);
-
-	led_pwm1[1] = (unsigned char)(mfd->bl_level);
-	mipi_dsi_cmds_tx(mfd, &novatek_tx_buf, novatek_cmd_backlight_cmds,
-			ARRAY_SIZE(novatek_cmd_backlight_cmds));
-	mutex_unlock(&mfd->dma->ov_mutex);
-	return;
+	mdp4_backlight_put_level(0, mfd->bl_level);
 }
 
 static int mipi_dsi_3d_barrier_sysfs_register(struct device *dev);
diff --git a/drivers/video/msm/mipi_orise.c b/drivers/video/msm/mipi_orise.c
index 2afbb9b6..d1d6956 100644
--- a/drivers/video/msm/mipi_orise.c
+++ b/drivers/video/msm/mipi_orise.c
@@ -64,10 +64,10 @@
 	mipi  = &mfd->panel_info.mipi;
 
 	if (mipi->mode == DSI_VIDEO_MODE) {
-		mipi_dsi_cmds_tx(mfd, &orise_tx_buf, orise_video_on_cmds,
+		mipi_dsi_cmds_tx(&orise_tx_buf, orise_video_on_cmds,
 			ARRAY_SIZE(orise_video_on_cmds));
 	} else {
-		mipi_dsi_cmds_tx(mfd, &orise_tx_buf, orise_cmd_on_cmds,
+		mipi_dsi_cmds_tx(&orise_tx_buf, orise_cmd_on_cmds,
 			ARRAY_SIZE(orise_cmd_on_cmds));
 
 		mipi_dsi_cmd_bta_sw_trigger(); /* clean up ack_err_status */
@@ -87,7 +87,7 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(mfd, &orise_tx_buf, orise_display_off_cmds,
+	mipi_dsi_cmds_tx(&orise_tx_buf, orise_display_off_cmds,
 			ARRAY_SIZE(orise_display_off_cmds));
 
 	return 0;
diff --git a/drivers/video/msm/mipi_renesas.c b/drivers/video/msm/mipi_renesas.c
index c9dc8255..c842672 100644
--- a/drivers/video/msm/mipi_renesas.c
+++ b/drivers/video/msm/mipi_renesas.c
@@ -1131,23 +1131,23 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_sleep_off_cmds,
+	mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_sleep_off_cmds,
 			ARRAY_SIZE(renesas_sleep_off_cmds));
 
 	mipi_set_tx_power_mode(1);
-	mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_display_on_cmds,
+	mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_display_on_cmds,
 			ARRAY_SIZE(renesas_display_on_cmds));
 
 	if (cpu_is_msm7x25a() || cpu_is_msm7x25aa() || cpu_is_msm7x25ab()) {
-		mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_hvga_on_cmds,
+		mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_hvga_on_cmds,
 			ARRAY_SIZE(renesas_hvga_on_cmds));
 	}
 
 	if (mipi->mode == DSI_VIDEO_MODE)
-		mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_video_on_cmds,
+		mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_video_on_cmds,
 			ARRAY_SIZE(renesas_video_on_cmds));
 	else
-		mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_cmd_on_cmds,
+		mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_cmd_on_cmds,
 			ARRAY_SIZE(renesas_cmd_on_cmds));
 	mipi_set_tx_power_mode(0);
 
@@ -1165,7 +1165,7 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(mfd, &renesas_tx_buf, renesas_display_off_cmds,
+	mipi_dsi_cmds_tx(&renesas_tx_buf, renesas_display_off_cmds,
 			ARRAY_SIZE(renesas_display_off_cmds));
 
 	return 0;
diff --git a/drivers/video/msm/mipi_simulator.c b/drivers/video/msm/mipi_simulator.c
index c6bf534..c751472 100644
--- a/drivers/video/msm/mipi_simulator.c
+++ b/drivers/video/msm/mipi_simulator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -49,7 +49,7 @@
 		 mipi->mode);
 
 	if (mipi->mode == DSI_VIDEO_MODE) {
-		mipi_dsi_cmds_tx(mfd, &simulator_tx_buf, display_on_cmds,
+		mipi_dsi_cmds_tx(&simulator_tx_buf, display_on_cmds,
 			ARRAY_SIZE(display_on_cmds));
 	} else {
 		pr_err("%s:%d, CMD MODE NOT SUPPORTED", __func__, __LINE__);
@@ -75,7 +75,7 @@
 	pr_debug("%s:%d, debug info", __func__, __LINE__);
 
 	if (mipi->mode == DSI_VIDEO_MODE) {
-		mipi_dsi_cmds_tx(mfd, &simulator_tx_buf, display_off_cmds,
+		mipi_dsi_cmds_tx(&simulator_tx_buf, display_off_cmds,
 			ARRAY_SIZE(display_off_cmds));
 	} else {
 		pr_debug("%s:%d, DONT REACH HERE", __func__, __LINE__);
diff --git a/drivers/video/msm/mipi_tc358764_dsi2lvds.c b/drivers/video/msm/mipi_tc358764_dsi2lvds.c
index 2e65c34..1583168 100644
--- a/drivers/video/msm/mipi_tc358764_dsi2lvds.c
+++ b/drivers/video/msm/mipi_tc358764_dsi2lvds.c
@@ -281,8 +281,8 @@
 	payload.addr = reg;
 	payload.data = data;
 
-	/* mutex had been acquired at mipi_dsi_on */
-	mipi_dsi_cmds_tx(mfd, &d2l_tx_buf, &cmd_write_reg, 1);
+	/* mutex had been acquried at dsi_on */
+	mipi_dsi_cmds_tx(&d2l_tx_buf, &cmd_write_reg, 1);
 
 	pr_debug("%s: reg=0x%x. data=0x%x.\n", __func__, reg, data);
 
diff --git a/drivers/video/msm/mipi_toshiba.c b/drivers/video/msm/mipi_toshiba.c
index aeaa5aa..520c67b 100644
--- a/drivers/video/msm/mipi_toshiba.c
+++ b/drivers/video/msm/mipi_toshiba.c
@@ -193,12 +193,12 @@
 		return -EINVAL;
 
 	if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WVGA_PT)
-		mipi_dsi_cmds_tx(mfd, &toshiba_tx_buf,
+		mipi_dsi_cmds_tx(&toshiba_tx_buf,
 			toshiba_wvga_display_on_cmds,
 			ARRAY_SIZE(toshiba_wvga_display_on_cmds));
 	else if (TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WSVGA_PT ||
 		TM_GET_PID(mfd->panel.id) == MIPI_DSI_PANEL_WUXGA)
-		mipi_dsi_cmds_tx(mfd, &toshiba_tx_buf,
+		mipi_dsi_cmds_tx(&toshiba_tx_buf,
 			toshiba_wsvga_display_on_cmds,
 			ARRAY_SIZE(toshiba_wsvga_display_on_cmds));
 	else
@@ -218,7 +218,7 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(mfd, &toshiba_tx_buf, toshiba_display_off_cmds,
+	mipi_dsi_cmds_tx(&toshiba_tx_buf, toshiba_display_off_cmds,
 			ARRAY_SIZE(toshiba_display_off_cmds));
 
 	return 0;
diff --git a/drivers/video/msm/mipi_truly.c b/drivers/video/msm/mipi_truly.c
index a2060f0..fd2a3ea 100644
--- a/drivers/video/msm/mipi_truly.c
+++ b/drivers/video/msm/mipi_truly.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -116,7 +116,7 @@
 		return -EINVAL;
 
 	msleep(20);
-	mipi_dsi_cmds_tx(mfd, &truly_tx_buf, truly_display_on_cmds,
+	mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_on_cmds,
 			ARRAY_SIZE(truly_display_on_cmds));
 
 	return 0;
@@ -133,7 +133,7 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(mfd, &truly_tx_buf, truly_display_off_cmds,
+	mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_off_cmds,
 			ARRAY_SIZE(truly_display_off_cmds));
 
 	return 0;
diff --git a/drivers/video/msm/mipi_truly_tft540960_1_e.c b/drivers/video/msm/mipi_truly_tft540960_1_e.c
index 98b24b1..50db66e 100644
--- a/drivers/video/msm/mipi_truly_tft540960_1_e.c
+++ b/drivers/video/msm/mipi_truly_tft540960_1_e.c
@@ -693,11 +693,11 @@
 	msleep(120);
 
 	if (mipi->mode == DSI_VIDEO_MODE) {
-		mipi_dsi_cmds_tx(mfd, &truly_tx_buf,
+		mipi_dsi_cmds_tx(&truly_tx_buf,
 			truly_video_display_on_cmds,
 			ARRAY_SIZE(truly_video_display_on_cmds));
 	} else if (mipi->mode == DSI_CMD_MODE) {
-		mipi_dsi_cmds_tx(mfd, &truly_tx_buf,
+		mipi_dsi_cmds_tx(&truly_tx_buf,
 			truly_cmd_display_on_cmds,
 			ARRAY_SIZE(truly_cmd_display_on_cmds));
 	}
@@ -716,7 +716,7 @@
 	if (mfd->key != MFD_KEY)
 		return -EINVAL;
 
-	mipi_dsi_cmds_tx(mfd, &truly_tx_buf, truly_display_off_cmds,
+	mipi_dsi_cmds_tx(&truly_tx_buf, truly_display_off_cmds,
 			ARRAY_SIZE(truly_display_off_cmds));
 
 	return 0;
diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c
index 176f56b..72e3600 100644
--- a/drivers/video/msm/msm_fb.c
+++ b/drivers/video/msm/msm_fb.c
@@ -1310,14 +1310,14 @@
 	var->bits_per_pixel = bpp * 8;	/* FrameBuffer color depth */
 	if (mfd->dest == DISPLAY_LCD) {
 		if (panel_info->type == MDDI_PANEL && panel_info->mddi.is_type1)
-			var->reserved[3] = panel_info->lcd.refx100 / (100 * 2);
+			var->reserved[4] = panel_info->lcd.refx100 / (100 * 2);
 		else
-			var->reserved[3] = panel_info->lcd.refx100 / 100;
+			var->reserved[4] = panel_info->lcd.refx100 / 100;
 	} else {
 		if (panel_info->type == MIPI_VIDEO_PANEL) {
-			var->reserved[3] = panel_info->mipi.frame_rate;
+			var->reserved[4] = panel_info->mipi.frame_rate;
 		} else {
-			var->reserved[3] = panel_info->clk_rate /
+			var->reserved[4] = panel_info->clk_rate /
 				((panel_info->lcdc.h_back_porch +
 				  panel_info->lcdc.h_front_porch +
 				  panel_info->lcdc.h_pulse_width +
@@ -1328,7 +1328,7 @@
 				  panel_info->yres));
 		}
 	}
-	pr_debug("reserved[3] %u\n", var->reserved[3]);
+	pr_debug("reserved[4] %u\n", var->reserved[4]);
 
 		/*
 		 * id field for fb app
@@ -1407,6 +1407,14 @@
 	fbi->fix.smem_start = (unsigned long)fbram_phys;
 
 	msm_iommu_map_contig_buffer(fbi->fix.smem_start,
+					DISPLAY_WRITE_DOMAIN,
+					GEN_POOL,
+					fbi->fix.smem_len,
+					SZ_4K,
+					0,
+					&(mfd->display_iova));
+
+	msm_iommu_map_contig_buffer(fbi->fix.smem_start,
 					DISPLAY_READ_DOMAIN,
 					GEN_POOL,
 					fbi->fix.smem_len,
@@ -1684,10 +1692,9 @@
 	struct msm_fb_panel_data *pdata;
 
 	/*
-	 * If framebuffer is 1 or 2, io pen display is not allowed.
+	 * If framebuffer is 2, io pen display is not allowed.
 	 */
-	if (bf_supported &&
-		(info->node == 1 || info->node == 2)) {
+	if (bf_supported && info->node == 2) {
 		pr_err("%s: no pan display for fb%d!",
 		       __func__, info->node);
 		return -EPERM;
@@ -2847,7 +2854,7 @@
 
 static int msmfb_overlay_unset(struct fb_info *info, unsigned long *argp)
 {
-	int	ret, ndx;
+	int ret, ndx;
 
 	ret = copy_from_user(&ndx, argp, sizeof(ndx));
 	if (ret) {
@@ -2859,6 +2866,41 @@
 	return mdp4_overlay_unset(info, ndx);
 }
 
+static int msmfb_overlay_wait4vsync(struct fb_info *info, void __user *argp)
+{
+	int ret;
+	long long vtime;
+
+	ret = mdp4_overlay_wait4vsync(info, &vtime);
+	if (ret) {
+		pr_err("%s: ioctl failed\n", __func__);
+		return ret;
+	}
+
+	if (copy_to_user(argp, &vtime, sizeof(vtime))) {
+		pr_err("%s: copy2user failed\n", __func__);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int msmfb_overlay_vsync_ctrl(struct fb_info *info, void __user *argp)
+{
+	int ret;
+	int enable;
+
+	ret = copy_from_user(&enable, argp, sizeof(enable));
+	if (ret) {
+		pr_err("%s:msmfb_overlay_vsync ioctl failed", __func__);
+		return ret;
+	}
+
+	ret = mdp4_overlay_vsync_ctrl(info, enable);
+
+	return ret;
+}
+
 static int msmfb_overlay_play_wait(struct fb_info *info, unsigned long *argp)
 {
 	int ret;
@@ -2965,27 +3007,6 @@
 	return ret;
 }
 
-static int msmfb_overlay_blt_off(struct fb_info *info, unsigned long *argp)
-{
-	int	ret;
-	struct msmfb_overlay_blt req;
-
-	ret = copy_from_user(&req, argp, sizeof(req));
-	if (ret) {
-		pr_err("%s: failed\n", __func__);
-		return ret;
-	}
-
-	ret = mdp4_overlay_blt_offset(info, &req);
-
-	ret = copy_to_user(argp, &req, sizeof(req));
-	if (ret)
-		printk(KERN_ERR "%s:msmfb_overlay_blt_off ioctl failed\n",
-		__func__);
-
-	return ret;
-}
-
 #ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL
 static int msmfb_overlay_ioctl_writeback_init(struct fb_info *info)
 {
@@ -3295,6 +3316,16 @@
 
 	switch (cmd) {
 #ifdef CONFIG_FB_MSM_OVERLAY
+	case FBIO_WAITFORVSYNC:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_overlay_wait4vsync(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
+	case MSMFB_OVERLAY_VSYNC_CTRL:
+		down(&msm_fb_ioctl_ppp_sem);
+		ret = msmfb_overlay_vsync_ctrl(info, argp);
+		up(&msm_fb_ioctl_ppp_sem);
+		break;
 	case MSMFB_OVERLAY_GET:
 		down(&msm_fb_ioctl_ppp_sem);
 		ret = msmfb_overlay_get(info, argp);
@@ -3330,11 +3361,6 @@
 		ret = msmfb_overlay_blt(info, argp);
 		up(&msm_fb_ioctl_ppp_sem);
 		break;
-	case MSMFB_OVERLAY_BLT_OFFSET:
-		down(&msm_fb_ioctl_ppp_sem);
-		ret = msmfb_overlay_blt_off(info, argp);
-		up(&msm_fb_ioctl_ppp_sem);
-		break;
 	case MSMFB_OVERLAY_3D:
 		down(&msm_fb_ioctl_ppp_sem);
 		ret = msmfb_overlay_3d_sbys(info, argp);
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_api.c b/drivers/video/msm/vidc/common/vcd/vcd_api.c
index c66c2b7..0dbbf57 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_api.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_api.c
@@ -13,6 +13,7 @@
 
 #include <linux/export.h>
 #include <media/msm/vidc_type.h>
+#include <media/msm/vidc_init.h>
 #include "vcd.h"
 
 u32 vcd_init(struct vcd_init_config *config, s32 *driver_handle)
@@ -157,7 +158,7 @@
 		       void *handle, void *const client_data),
 	void *client_data, int flags)
 {
-	u32 rc = 0;
+	u32 rc = 0, num_of_instances = 0;
 	struct vcd_drv_ctxt *drv_ctxt;
 	struct vcd_clnt_ctxt *cctxt;
 	int is_secure = (flags & VCD_CP_SESSION) ? 1 : 0;
@@ -167,6 +168,17 @@
 		VCD_MSG_ERROR("Bad parameters");
 		return -EINVAL;
 	}
+
+	drv_ctxt = vcd_get_drv_context();
+	cctxt = drv_ctxt->dev_ctxt.cctxt_list_head;
+	while (cctxt) {
+		num_of_instances++;
+		cctxt = cctxt->next;
+	}
+	if (num_of_instances == VIDC_MAX_NUM_CLIENTS) {
+		pr_err(" %s(): Max number of clients reached\n", __func__);
+		return -ENODEV;
+	}
 	rc = is_session_invalid(decoding, flags);
 	if (rc) {
 		VCD_MSG_ERROR("Invalid Session: is_decoder: %d, secure: %d\n",
@@ -175,7 +187,6 @@
 	}
 	if (is_secure)
 		res_trk_secure_set();
-	drv_ctxt = vcd_get_drv_context();
 	mutex_lock(&drv_ctxt->dev_mutex);
 
 	if (drv_ctxt->dev_state.state_table->ev_hdlr.open) {
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_core.h b/drivers/video/msm/vidc/common/vcd/vcd_core.h
index 79bcac0f..8126a0e 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_core.h
+++ b/drivers/video/msm/vidc/common/vcd/vcd_core.h
@@ -25,7 +25,7 @@
 
 #define VCD_MIN_PERF_LEVEL                   37900
 
-#define VCD_DRIVER_INSTANCE_MAX              4
+#define VCD_DRIVER_CLIENTS_MAX              6
 
 #define VCD_MAX_CLIENT_TRANSACTIONS          32
 
@@ -126,7 +126,7 @@
 
 	struct vcd_init_config config;
 
-	u32 driver_ids[VCD_DRIVER_INSTANCE_MAX];
+	u32 driver_ids[VCD_DRIVER_CLIENTS_MAX];
 	u32 refs;
 	u8 *device_base_addr;
 	void *hw_timer_handle;
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
index 96e729de..53495e0 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_device_sm.c
@@ -537,12 +537,12 @@
 	*driver_handle = 0;
 
 	driver_id = 0;
-	while (driver_id < VCD_DRIVER_INSTANCE_MAX &&
+	while (driver_id < VCD_DRIVER_CLIENTS_MAX &&
 		   dev_ctxt->driver_ids[driver_id]) {
 		++driver_id;
 	}
 
-	if (driver_id == VCD_DRIVER_INSTANCE_MAX) {
+	if (driver_id == VCD_DRIVER_CLIENTS_MAX) {
 		VCD_MSG_ERROR("Max driver instances reached");
 
 		return VCD_ERR_FAIL;
diff --git a/drivers/video/msm/vidc/common/vcd/vcd_sub.c b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
index 5b64f20..c11ac301 100644
--- a/drivers/video/msm/vidc/common/vcd/vcd_sub.c
+++ b/drivers/video/msm/vidc/common/vcd/vcd_sub.c
@@ -1228,7 +1228,7 @@
 	driver_handle--;
 
 	if (driver_handle < 0 ||
-		driver_handle >= VCD_DRIVER_INSTANCE_MAX ||
+		driver_handle >= VCD_DRIVER_CLIENTS_MAX ||
 		!dev_ctxt->driver_ids[driver_handle]) {
 		return false;
 	} else {
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 64c3b31..aa24919 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -124,7 +124,7 @@
 	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
 	seq_putc(p, '\n');
 
-	for_each_online_cpu(i) {
+	for_each_present_cpu(i) {
 		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
 		user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
 		nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b2b79b1..277fdf1 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -259,6 +259,7 @@
 header-y += msdos_fs.h
 header-y += msg.h
 header-y += msm_adc.h
+header-y += msm_ion.h
 header-y += epm_adc.h
 header-y += mtio.h
 header-y += n_r3964.h
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index f03a493..b6064a4 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -47,53 +47,107 @@
 enum coresight_dev_type {
 	CORESIGHT_DEV_TYPE_SINK,
 	CORESIGHT_DEV_TYPE_LINK,
+	CORESIGHT_DEV_TYPE_LINKSINK,
 	CORESIGHT_DEV_TYPE_SOURCE,
-	CORESIGHT_DEV_TYPE_MAX,
+};
+
+enum coresight_dev_subtype_sink {
+	CORESIGHT_DEV_SUBTYPE_SINK_NONE,
+	CORESIGHT_DEV_SUBTYPE_SINK_PORT,
+	CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+};
+
+enum coresight_dev_subtype_link {
+	CORESIGHT_DEV_SUBTYPE_LINK_NONE,
+	CORESIGHT_DEV_SUBTYPE_LINK_MERG,
+	CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
+	CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
+};
+
+enum coresight_dev_subtype_source {
+	CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
+	CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
+	CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
+	CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+};
+
+struct coresight_dev_subtype {
+	enum coresight_dev_subtype_sink sink_subtype;
+	enum coresight_dev_subtype_link link_subtype;
+	enum coresight_dev_subtype_source source_subtype;
+};
+
+struct coresight_platform_data {
+	int id;
+	const char *name;
+	int nr_inports;
+	const int *outports;
+	const int *child_ids;
+	const int *child_ports;
+	int nr_outports;
+	bool default_sink;
+};
+
+struct coresight_desc {
+	enum coresight_dev_type type;
+	struct coresight_dev_subtype subtype;
+	const struct coresight_ops *ops;
+	struct coresight_platform_data *pdata;
+	struct device *dev;
+	const struct attribute_group **groups;
+	struct module *owner;
 };
 
 struct coresight_connection {
+	int outport;
 	int child_id;
 	int child_port;
 	struct coresight_device *child_dev;
 	struct list_head link;
 };
 
+struct coresight_refcnt {
+	int sink_refcnt;
+	int *link_refcnts;
+	int source_refcnt;
+};
+
 struct coresight_device {
 	int id;
 	struct coresight_connection *conns;
 	int nr_conns;
+	enum coresight_dev_type type;
+	struct coresight_dev_subtype subtype;
 	const struct coresight_ops *ops;
 	struct device dev;
-	struct mutex mutex;
-	int *refcnt;
-	struct list_head link;
+	struct coresight_refcnt refcnt;
+	struct list_head dev_link;
+	struct list_head path_link;
 	struct module *owner;
 	bool enable;
 };
 
 #define to_coresight_device(d) container_of(d, struct coresight_device, dev)
 
+struct coresight_ops_sink {
+	int (*enable)(struct coresight_device *csdev);
+	void (*disable)(struct coresight_device *csdev);
+};
+
+struct coresight_ops_link {
+	int (*enable)(struct coresight_device *csdev, int iport, int oport);
+	void (*disable)(struct coresight_device *csdev, int iport, int oport);
+};
+
+struct coresight_ops_source {
+	int (*enable)(struct coresight_device *csdev);
+	void (*disable)(struct coresight_device *csdev);
+};
+
 struct coresight_ops {
-	int (*enable)(struct coresight_device *csdev, int port);
-	void (*disable)(struct coresight_device *csdev, int port);
-};
-
-struct coresight_platform_data {
-	int id;
-	const char *name;
-	int nr_ports;
-	int *child_ids;
-	int *child_ports;
-	int nr_children;
-};
-
-struct coresight_desc {
-	enum coresight_dev_type type;
-	const struct coresight_ops *ops;
-	struct coresight_platform_data *pdata;
-	struct device *dev;
-	const struct attribute_group **groups;
-	struct module *owner;
+	const struct coresight_ops_sink *sink_ops;
+	const struct coresight_ops_link *link_ops;
+	const struct coresight_ops_source *source_ops;
 };
 
 struct qdss_source {
@@ -109,24 +163,29 @@
 };
 
 
-extern struct coresight_device *
-coresight_register(struct coresight_desc *desc);
-extern void coresight_unregister(struct coresight_device *csdev);
-extern int coresight_enable(struct coresight_device *csdev, int port);
-extern void coresight_disable(struct coresight_device *csdev, int port);
-
 #ifdef CONFIG_MSM_QDSS
 extern struct qdss_source *qdss_get(const char *name);
 extern void qdss_put(struct qdss_source *src);
 extern int qdss_enable(struct qdss_source *src);
 extern void qdss_disable(struct qdss_source *src);
 extern void qdss_disable_sink(void);
+extern struct coresight_device *
+coresight_register(struct coresight_desc *desc);
+extern void coresight_unregister(struct coresight_device *csdev);
+extern int coresight_enable(struct coresight_device *csdev);
+extern void coresight_disable(struct coresight_device *csdev);
 #else
 static inline struct qdss_source *qdss_get(const char *name) { return NULL; }
 static inline void qdss_put(struct qdss_source *src) {}
 static inline int qdss_enable(struct qdss_source *src) { return -ENOSYS; }
 static inline void qdss_disable(struct qdss_source *src) {}
 static inline void qdss_disable_sink(void) {}
+static inline struct coresight_device *
+coresight_register(struct coresight_desc *desc) { return NULL; }
+static inline void coresight_unregister(struct coresight_device *csdev) {}
+static inline int
+coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
+static inline void coresight_disable(struct coresight_device *csdev) {}
 #endif
 
 #endif
diff --git a/include/linux/fb.h b/include/linux/fb.h
index d31cb68..f6a2923 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -279,7 +279,7 @@
 	__u32 vmode;			/* see FB_VMODE_*		*/
 	__u32 rotate;			/* angle we rotate counter clockwise */
 	__u32 colorspace;		/* colorspace for FOURCC-based modes */
-	__u32 reserved[4];		/* Reserved for future compatibility */
+	__u32 reserved[5];		/* Reserved for future compatibility */
 };
 
 struct fb_cmap {
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index b836824..338c891 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -121,6 +121,7 @@
 	unsigned int		sg_len;		/* size of scatter list */
 	struct scatterlist	*sg;		/* I/O scatter list */
 	s32			host_cookie;	/* host private data */
+	bool			fault_injected; /* fault injected */
 };
 
 struct mmc_request {
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
new file mode 100644
index 0000000..0e28e54
--- /dev/null
+++ b/include/linux/msm_ion.h
@@ -0,0 +1,22 @@
+/*
+ * include/linux/ion.h
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_MSM_ION_H
+#define _LINUX_MSM_ION_H
+
+#include <linux/ion.h>
+
+#endif
diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h
index d8edbc8..4c42623 100644
--- a/include/linux/msm_mdp.h
+++ b/include/linux/msm_mdp.h
@@ -32,8 +32,11 @@
 #define MSMFB_OVERLAY_SET       _IOWR(MSMFB_IOCTL_MAGIC, 135, \
 						struct mdp_overlay)
 #define MSMFB_OVERLAY_UNSET     _IOW(MSMFB_IOCTL_MAGIC, 136, unsigned int)
+
 #define MSMFB_OVERLAY_PLAY      _IOW(MSMFB_IOCTL_MAGIC, 137, \
 						struct msmfb_overlay_data)
+#define MSMFB_OVERLAY_QUEUE	MSMFB_OVERLAY_PLAY
+
 #define MSMFB_GET_PAGE_PROTECTION _IOR(MSMFB_IOCTL_MAGIC, 138, \
 					struct mdp_page_protection)
 #define MSMFB_SET_PAGE_PROTECTION _IOW(MSMFB_IOCTL_MAGIC, 139, \
@@ -66,6 +69,9 @@
 #define MSMFB_WRITEBACK_TERMINATE _IO(MSMFB_IOCTL_MAGIC, 155)
 #define MSMFB_MDP_PP _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp)
 
+#define MSMFB_OVERLAY_VSYNC_CTRL  _IOW(MSMFB_IOCTL_MAGIC, 160, unsigned int)
+
+
 #define FB_TYPE_3D_PANEL 0x10101010
 #define MDP_IMGTYPE2_START 0x10000
 #define MSMFB_DRIVER_VERSION	0xF9E8D701
@@ -148,7 +154,7 @@
 #define MDP_DEINTERLACE_ODD		0x00400000
 #define MDP_OV_PLAY_NOWAIT		0x00200000
 #define MDP_SOURCE_ROTATED_90		0x00100000
-#define MDP_DPP_HSIC			0x00080000
+#define MDP_OVERLAY_PP_CFG_EN		0x00080000
 #define MDP_BACKEND_COMPOSITION		0x00040000
 #define MDP_BORDERFILL_SUPPORTED	0x00010000
 #define MDP_SECURE_OVERLAY_SESSION      0x00008000
@@ -265,15 +271,47 @@
 	struct msmfb_img img;
 };
 
-struct dpp_ctrl {
-	/*
-	 *'sharp_strength' has inputs = -128 <-> 127
-	 *  Increasingly positive values correlate with increasingly sharper
-	 *  picture. Increasingly negative values correlate with increasingly
-	 *  smoothed picture.
-	 */
-	int8_t sharp_strength;
-	int8_t hsic_params[NUM_HSIC_PARAM];
+#define MDP_PP_OPS_READ 0x2
+#define MDP_PP_OPS_WRITE 0x4
+
+struct mdp_qseed_cfg {
+	uint32_t table_num;
+	uint32_t ops;
+	uint32_t len;
+	uint32_t *data;
+};
+
+struct mdp_qseed_cfg_data {
+	uint32_t block;
+	struct mdp_qseed_cfg qseed_data;
+};
+
+#define MDP_OVERLAY_PP_CSC_CFG      0x1
+#define MDP_OVERLAY_PP_QSEED_CFG    0x2
+
+#define MDP_CSC_FLAG_ENABLE	0x1
+#define MDP_CSC_FLAG_YUV_IN	0x2
+#define MDP_CSC_FLAG_YUV_OUT	0x4
+
+struct mdp_csc_cfg {
+	/* flags for enable CSC, toggling RGB,YUV input/output */
+	uint32_t flags;
+	uint32_t csc_mv[9];
+	uint32_t csc_pre_bv[3];
+	uint32_t csc_post_bv[3];
+	uint32_t csc_pre_lv[6];
+	uint32_t csc_post_lv[6];
+};
+
+struct mdp_csc_cfg_data {
+	uint32_t block;
+	struct mdp_csc_cfg csc_data;
+};
+
+struct mdp_overlay_pp_params {
+	uint32_t config_ops;
+	struct mdp_csc_cfg csc_cfg;
+	struct mdp_qseed_cfg qseed_cfg[2];
 };
 
 struct mdp_overlay {
@@ -287,7 +325,7 @@
 	uint32_t flags;
 	uint32_t id;
 	uint32_t user_data[8];
-	struct dpp_ctrl dpp;
+	struct mdp_overlay_pp_params overlay_pp_cfg;
 };
 
 struct msmfb_overlay_3d {
@@ -375,25 +413,6 @@
 	struct mdp_pcc_coeff r, g, b;
 };
 
-#define MDP_CSC_FLAG_ENABLE	0x1
-#define MDP_CSC_FLAG_YUV_IN	0x2
-#define MDP_CSC_FLAG_YUV_OUT	0x4
-
-struct mdp_csc_cfg {
-	/* flags for enable CSC, toggling RGB,YUV input/output */
-	uint32_t flags;
-	uint32_t csc_mv[9];
-	uint32_t csc_pre_bv[3];
-	uint32_t csc_post_bv[3];
-	uint32_t csc_pre_lv[6];
-	uint32_t csc_post_lv[6];
-};
-
-struct mdp_csc_cfg_data {
-	uint32_t block;
-	struct mdp_csc_cfg csc_data;
-};
-
 enum {
 	mdp_lut_igc,
 	mdp_lut_pgc,
@@ -401,7 +420,6 @@
 	mdp_lut_max,
 };
 
-
 struct mdp_igc_lut_data {
 	uint32_t block;
 	uint32_t len, ops;
@@ -443,14 +461,6 @@
 	} data;
 };
 
-struct mdp_qseed_cfg_data {
-	uint32_t block;
-	uint32_t table_num;
-	uint32_t ops;
-	uint32_t len;
-	uint32_t *data;
-};
-
 struct mdp_bl_scale_data {
 	uint32_t min_lvl;
 	uint32_t scale;
diff --git a/include/linux/msm_thermal.h b/include/linux/msm_thermal.h
index fe9be89..47a8753 100644
--- a/include/linux/msm_thermal.h
+++ b/include/linux/msm_thermal.h
@@ -17,9 +17,9 @@
 struct msm_thermal_data {
 	uint32_t sensor_id;
 	uint32_t poll_ms;
-	uint32_t limit_temp;
-	uint32_t temp_hysteresis;
-	uint32_t limit_freq;
+	uint32_t limit_temp_degC;
+	uint32_t temp_hysteresis_degC;
+	uint32_t freq_step;
 };
 
 #ifdef CONFIG_THERMAL_MONITOR
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
new file mode 100644
index 0000000..33559dd
--- /dev/null
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -0,0 +1,689 @@
+/*
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PMIC QPNP ADC driver header file
+ *
+ */
+
+#ifndef __QPNP_ADC_H
+#define __QPNP_ADC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+/**
+ * enum qpnp_vadc_channels - QPNP AMUX arbiter channels
+ */
+enum qpnp_vadc_channels {
+	USBIN = 0,
+	DCIN,
+	VCHG_SNS,
+	SPARE1_03,
+	SPARE2_03,
+	VCOIN,
+	VBAT_SNS,
+	VSYS,
+	DIE_TEMP,
+	REF_625MV,
+	REF_125V,
+	CHG_TEMP,
+	SPARE1,
+	SPARE2,
+	GND_REF,
+	VDD_VADC,
+	P_MUX1_1_1,
+	P_MUX2_1_1,
+	P_MUX3_1_1,
+	P_MUX4_1_1,
+	P_MUX5_1_1,
+	P_MUX6_1_1,
+	P_MUX7_1_1,
+	P_MUX8_1_1,
+	P_MUX9_1_1,
+	P_MUX10_1_1,
+	P_MUX11_1_1,
+	P_MUX12_1_1,
+	P_MUX13_1_1,
+	P_MUX14_1_1,
+	P_MUX15_1_1,
+	P_MUX16_1_1,
+	P_MUX1_1_3,
+	P_MUX2_1_3,
+	P_MUX3_1_3,
+	P_MUX4_1_3,
+	P_MUX5_1_3,
+	P_MUX6_1_3,
+	P_MUX7_1_3,
+	P_MUX8_1_3,
+	P_MUX9_1_3,
+	P_MUX10_1_3,
+	P_MUX11_1_3,
+	P_MUX12_1_3,
+	P_MUX13_1_3,
+	P_MUX14_1_3,
+	P_MUX15_1_3,
+	P_MUX16_1_3,
+	LR_MUX1_BATT_THERM,
+	LR_MUX2_BAT_ID,
+	LR_MUX3_XO_THERM,
+	LR_MUX4_AMUX_THM1,
+	LR_MUX5_AMUX_THM2,
+	LR_MUX6_AMUX_THM3,
+	LR_MUX7_HW_ID,
+	LR_MUX8_AMUX_THM4,
+	LR_MUX9_AMUX_THM5,
+	LR_MUX10_USB_ID,
+	AMUX_PU1,
+	AMUX_PU2,
+	LR_MUX3_BUF_XO_THERM_BUF,
+	LR_MUX1_PU1_BAT_THERM,
+	LR_MUX2_PU1_BAT_ID,
+	LR_MUX3_PU1_XO_THERM,
+	LR_MUX4_PU1_AMUX_THM1,
+	LR_MUX5_PU1_AMUX_THM2,
+	LR_MUX6_PU1_AMUX_THM3,
+	LR_MUX7_PU1_AMUX_HW_ID,
+	LR_MUX8_PU1_AMUX_THM4,
+	LR_MUX9_PU1_AMUX_THM5,
+	LR_MUX10_PU1_AMUX_USB_ID,
+	LR_MUX3_BUF_PU1_XO_THERM_BUF,
+	LR_MUX1_PU2_BAT_THERM,
+	LR_MUX2_PU2_BAT_ID,
+	LR_MUX3_PU2_XO_THERM,
+	LR_MUX4_PU2_AMUX_THM1,
+	LR_MUX5_PU2_AMUX_THM2,
+	LR_MUX6_PU2_AMUX_THM3,
+	LR_MUX7_PU2_AMUX_HW_ID,
+	LR_MUX8_PU2_AMUX_THM4,
+	LR_MUX9_PU2_AMUX_THM5,
+	LR_MUX10_PU2_AMUX_USB_ID,
+	LR_MUX3_BUF_PU2_XO_THERM_BUF,
+	LR_MUX1_PU1_PU2_BAT_THERM,
+	LR_MUX2_PU1_PU2_BAT_ID,
+	LR_MUX3_PU1_PU2_XO_THERM,
+	LR_MUX4_PU1_PU2_AMUX_THM1,
+	LR_MUX5_PU1_PU2_AMUX_THM2,
+	LR_MUX6_PU1_PU2_AMUX_THM3,
+	LR_MUX7_PU1_PU2_AMUX_HW_ID,
+	LR_MUX8_PU1_PU2_AMUX_THM4,
+	LR_MUX9_PU1_PU2_AMUX_THM5,
+	LR_MUX10_PU1_PU2_AMUX_USB_ID,
+	LR_MUX3_BUF_PU1_PU2_XO_THERM_BUF,
+	ALL_OFF,
+	ADC_MAX_NUM,
+};
+
+#define QPNP_ADC_625_UV	625000
+
+/**
+ * enum qpnp_adc_decimation_type - Sampling rate supported.
+ * %DECIMATION_TYPE1: 512
+ * %DECIMATION_TYPE2: 1K
+ * %DECIMATION_TYPE3: 2K
+ * %DECIMATION_TYPE4: 4k
+ * %DECIMATION_NONE: Do not use this Sampling type.
+ *
+ * The Sampling rate is specific to each channel of the QPNP ADC arbiter.
+ */
+enum qpnp_adc_decimation_type {
+	DECIMATION_TYPE1 = 0,
+	DECIMATION_TYPE2,
+	DECIMATION_TYPE3,
+	DECIMATION_TYPE4,
+	DECIMATION_NONE,
+};
+
+/**
+ * enum qpnp_adc_calib_type - QPNP ADC Calibration type.
+ * %ADC_CALIB_ABSOLUTE: Use 625mV and 1.25V reference channels.
+ * %ADC_CALIB_RATIOMETRIC: Use reference Voltage/GND.
+ * %ADC_CALIB_CONFIG_NONE: Do not use this calibration type.
+ *
+ * Use the input reference voltage depending on the calibration type
+ * to calcluate the offset and gain parameters. The calibration is
+ * specific to each channel of the QPNP ADC.
+ */
+enum qpnp_adc_calib_type {
+	CALIB_ABSOLUTE = 0,
+	CALIB_RATIOMETRIC,
+	CALIB_NONE,
+};
+
+/**
+ * enum qpnp_adc_channel_scaling_param - pre-scaling AMUX ratio.
+ * %CHAN_PATH_SCALING1: ratio of {1, 1}
+ * %CHAN_PATH_SCALING2: ratio of {1, 3}
+ * %CHAN_PATH_SCALING3: ratio of {1, 4}
+ * %CHAN_PATH_SCALING4: ratio of {1, 6}
+ * %CHAN_PATH_NONE: Do not use this pre-scaling ratio type.
+ *
+ * The pre-scaling is applied for signals to be within the voltage range
+ * of the ADC.
+ */
+enum qpnp_adc_channel_scaling_param {
+	PATH_SCALING1 = 0,
+	PATH_SCALING2,
+	PATH_SCALING3,
+	PATH_SCALING4,
+	PATH_SCALING_NONE,
+};
+
+/**
+ * enum qpnp_adc_scale_fn_type - Scaling function for pm8921 pre calibrated
+ *				   digital data relative to ADC reference.
+ * %ADC_SCALE_DEFAULT: Default scaling to convert raw adc code to voltage.
+ * %ADC_SCALE_BATT_THERM: Conversion to temperature based on btm parameters.
+ * %ADC_SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * %ADC_SCALE_XTERN_CHGR_CUR: Returns current across 0.1 ohm resistor.
+ * %ADC_SCALE_XOTHERM: Returns XO thermistor voltage in degree's Centigrade.
+ * %ADC_SCALE_NONE: Do not use this scaling type.
+ */
+enum qpnp_adc_scale_fn_type {
+	SCALE_DEFAULT = 0,
+	SCALE_BATT_THERM,
+	SCALE_PA_THERM,
+	SCALE_PMIC_THERM,
+	SCALE_XOTHERM,
+	SCALE_NONE,
+};
+
+/**
+ * enum qpnp_adc_fast_avg_ctl - Provides ability to obtain single result
+ *		from the ADC that is an average of multiple measurement
+ *		samples. Select number of samples for use in fast
+ *		average mode (i.e. 2 ^ value).
+ * %ADC_FAST_AVG_SAMPLE_1:   0x0 = 1
+ * %ADC_FAST_AVG_SAMPLE_2:   0x1 = 2
+ * %ADC_FAST_AVG_SAMPLE_4:   0x2 = 4
+ * %ADC_FAST_AVG_SAMPLE_8:   0x3 = 8
+ * %ADC_FAST_AVG_SAMPLE_16:  0x4 = 16
+ * %ADC_FAST_AVG_SAMPLE_32:  0x5 = 32
+ * %ADC_FAST_AVG_SAMPLE_64:  0x6 = 64
+ * %ADC_FAST_AVG_SAMPLE_128: 0x7 = 128
+ * %ADC_FAST_AVG_SAMPLE_256: 0x8 = 256
+ * %ADC_FAST_AVG_SAMPLE_512: 0x9 = 512
+ */
+enum qpnp_adc_fast_avg_ctl {
+	ADC_FAST_AVG_SAMPLE_1 = 0,
+	ADC_FAST_AVG_SAMPLE_2,
+	ADC_FAST_AVG_SAMPLE_4,
+	ADC_FAST_AVG_SAMPLE_8,
+	ADC_FAST_AVG_SAMPLE_16,
+	ADC_FAST_AVG_SAMPLE_32,
+	ADC_FAST_AVG_SAMPLE_64,
+	ADC_FAST_AVG_SAMPLE_128,
+	ADC_FAST_AVG_SAMPLE_256,
+	ADC_FAST_AVG_SAMPLE_512,
+	ADC_FAST_AVG_SAMPLE_NONE,
+};
+
+/**
+ * enum qpnp_adc_hw_settle_time - Time between AMUX getting configured and
+ *		the ADC starting conversion. Delay = 100us * value for
+ *		value < 11 and 2ms * (value - 10) otherwise.
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_0US:   0us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_100US: 100us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_200US: 200us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_300US: 300us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_400US: 400us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_500US: 500us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_600US: 600us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_700US: 700us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_800US: 800us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_900US: 900us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_1MS:   1ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_2MS:   2ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_4MS:   4ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_6MS:   6ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_8MS:   8ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_10MS:  10ms
+ * %ADC_CHANNEL_HW_SETTLE_NONE
+ */
+enum qpnp_adc_hw_settle_time {
+	ADC_CHANNEL_HW_SETTLE_DELAY_0US = 0,
+	ADC_CHANNEL_HW_SETTLE_DELAY_100US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_2000US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_300US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_400US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_500US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_600US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_700US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_800US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_900US,
+	ADC_CHANNEL_HW_SETTLE_DELAY_1MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_2MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_4MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_6MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_8MS,
+	ADC_CHANNEL_HW_SETTLE_DELAY_10MS,
+	ADC_CHANNEL_HW_SETTLE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_mode_sel - Selects the basic mode of operation.
+ *		- The normal mode is used for single measurement.
+ *		- The Conversion sequencer is used to trigger an
+ *		  ADC read when a HW trigger is selected.
+ *		- The measurement interval performs a single or
+ *		  continous measurement at a specified interval/delay.
+ * %ADC_OP_NORMAL_MODE : Normal mode used for single measurement.
+ * %ADC_OP_CONVERSION_SEQUENCER : Conversion sequencer used to trigger
+ *		  an ADC read on a HW supported trigger.
+ *		  Refer to enum qpnp_vadc_trigger for
+ *		  supported HW triggers.
+ * %ADC_OP_MEASUREMENT_INTERVAL : The measurement interval performs a
+ *		  single or continous measurement after a specified delay.
+ *		  For delay look at qpnp_adc_meas_timer.
+ */
+enum qpnp_vadc_mode_sel {
+	ADC_OP_NORMAL_MODE = 0,
+	ADC_OP_CONVERSION_SEQUENCER,
+	ADC_OP_MEASUREMENT_INTERVAL,
+	ADC_OP_MODE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_trigger - Select the HW trigger to be used while
+ *		measuring the ADC reading.
+ * %ADC_GSM_PA_ON : GSM power amplifier on.
+ * %ADC_TX_GTR_THRES : Transmit power greater than threshold.
+ * %ADC_CAMERA_FLASH_RAMP : Flash ramp up done.
+ * %ADC_DTEST : DTEST.
+ */
+enum qpnp_vadc_trigger {
+	ADC_GSM_PA_ON = 0,
+	ADC_TX_GTR_THRES,
+	ADC_CAMERA_FLASH_RAMP,
+	ADC_DTEST,
+	ADC_SEQ_NONE,
+};
+
+/**
+ * enum qpnp_vadc_conv_seq_timeout - Select delay (0 to 15ms) from
+ *		conversion request to triggering conversion sequencer
+ *		hold off time.
+ */
+enum qpnp_vadc_conv_seq_timeout {
+	ADC_CONV_SEQ_TIMEOUT_0MS = 0,
+	ADC_CONV_SEQ_TIMEOUT_1MS,
+	ADC_CONV_SEQ_TIMEOUT_2MS,
+	ADC_CONV_SEQ_TIMEOUT_3MS,
+	ADC_CONV_SEQ_TIMEOUT_4MS,
+	ADC_CONV_SEQ_TIMEOUT_5MS,
+	ADC_CONV_SEQ_TIMEOUT_6MS,
+	ADC_CONV_SEQ_TIMEOUT_7MS,
+	ADC_CONV_SEQ_TIMEOUT_8MS,
+	ADC_CONV_SEQ_TIMEOUT_9MS,
+	ADC_CONV_SEQ_TIMEOUT_10MS,
+	ADC_CONV_SEQ_TIMEOUT_11MS,
+	ADC_CONV_SEQ_TIMEOUT_12MS,
+	ADC_CONV_SEQ_TIMEOUT_13MS,
+	ADC_CONV_SEQ_TIMEOUT_14MS,
+	ADC_CONV_SEQ_TIMEOUT_15MS,
+	ADC_CONV_SEQ_TIMEOUT_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_holdoff - Select delay from conversion
+ *		trigger signal (i.e. adc_conv_seq_trig) transition
+ *		to ADC enable. Delay = 25us * (value + 1).
+ */
+enum qpnp_adc_conv_seq_holdoff {
+	ADC_SEQ_HOLD_25US = 0,
+	ADC_SEQ_HOLD_50US,
+	ADC_SEQ_HOLD_75US,
+	ADC_SEQ_HOLD_100US,
+	ADC_SEQ_HOLD_125US,
+	ADC_SEQ_HOLD_150US,
+	ADC_SEQ_HOLD_175US,
+	ADC_SEQ_HOLD_200US,
+	ADC_SEQ_HOLD_225US,
+	ADC_SEQ_HOLD_250US,
+	ADC_SEQ_HOLD_275US,
+	ADC_SEQ_HOLD_300US,
+	ADC_SEQ_HOLD_325US,
+	ADC_SEQ_HOLD_350US,
+	ADC_SEQ_HOLD_375US,
+	ADC_SEQ_HOLD_400US,
+	ADC_SEQ_HOLD_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_state - Conversion sequencer operating state
+ * %ADC_CONV_SEQ_IDLE : Sequencer is in idle.
+ * %ADC_CONV_TRIG_RISE : Waiting for rising edge trigger.
+ * %ADC_CONV_TRIG_HOLDOFF : Waiting for rising trigger hold off time.
+ * %ADC_CONV_MEAS_RISE : Measuring selected ADC signal.
+ * %ADC_CONV_TRIG_FALL : Waiting for falling trigger edge.
+ * %ADC_CONV_FALL_HOLDOFF : Waiting for falling trigger hold off time.
+ * %ADC_CONV_MEAS_FALL : Measuring selected ADC signal.
+ * %ADC_CONV_ERROR : Aberrant Hardware problem.
+ */
+enum qpnp_adc_conv_seq_state {
+	ADC_CONV_SEQ_IDLE = 0,
+	ADC_CONV_TRIG_RISE,
+	ADC_CONV_TRIG_HOLDOFF,
+	ADC_CONV_MEAS_RISE,
+	ADC_CONV_TRIG_FALL,
+	ADC_CONV_FALL_HOLDOFF,
+	ADC_CONV_MEAS_FALL,
+	ADC_CONV_ERROR,
+	ADC_CONV_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer - Selects the measurement interval time.
+ *		If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_1P0MS : 1ms
+ * %ADC_MEAS_INTERVAL_2P0MS : 2ms
+ * %ADC_MEAS_INTERVAL_3P9MS : 3.9ms
+ * %ADC_MEAS_INTERVAL_7P8MS : 7.8ms
+ * %ADC_MEAS_INTERVAL_15P6MS : 15.6ms
+ * %ADC_MEAS_INTERVAL_31P3MS : 31.3ms
+ * %ADC_MEAS_INTERVAL_62P5MS : 62.5ms
+ * %ADC_MEAS_INTERVAL_125MS : 125ms
+ * %ADC_MEAS_INTERVAL_250MS : 250ms
+ * %ADC_MEAS_INTERVAL_500MS : 500ms
+ * %ADC_MEAS_INTERVAL_1S : 1seconds
+ * %ADC_MEAS_INTERVAL_2S : 2seconds
+ * %ADC_MEAS_INTERVAL_4S : 4seconds
+ * %ADC_MEAS_INTERVAL_8S : 8seconds
+ * %ADC_MEAS_INTERVAL_16S: 16seconds
+ */
+enum qpnp_adc_meas_timer {
+	ADC_MEAS_INTERVAL_0MS = 0,
+	ADC_MEAS_INTERVAL_1P0MS,
+	ADC_MEAS_INTERVAL_2P0MS,
+	ADC_MEAS_INTERVAL_3P9MS,
+	ADC_MEAS_INTERVAL_7P8MS,
+	ADC_MEAS_INTERVAL_15P6MS,
+	ADC_MEAS_INTERVAL_31P3MS,
+	ADC_MEAS_INTERVAL_62P5MS,
+	ADC_MEAS_INTERVAL_125MS,
+	ADC_MEAS_INTERVAL_250MS,
+	ADC_MEAS_INTERVAL_500MS,
+	ADC_MEAS_INTERVAL_1S,
+	ADC_MEAS_INTERVAL_2S,
+	ADC_MEAS_INTERVAL_4S,
+	ADC_MEAS_INTERVAL_8S,
+	ADC_MEAS_INTERVAL_16S,
+	ADC_MEAS_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_interval_op_ctl - Select operating mode.
+ * %ADC_MEAS_INTERVAL_OP_SINGLE : Conduct single measurement at specified time
+ *			delay.
+ * %ADC_MEAS_INTERVAL_OP_CONTINUOUS : Make measurements at measurement interval
+ *			times.
+ */
+enum qpnp_adc_meas_interval_op_ctl {
+	ADC_MEAS_INTERVAL_OP_SINGLE = 0,
+	ADC_MEAS_INTERVAL_OP_CONTINUOUS,
+	ADC_MEAS_INTERVAL_OP_NONE,
+};
+
+/**
+ * struct qpnp_vadc_linear_graph - Represent ADC characteristics.
+ * @dy: Numerator slope to calculate the gain.
+ * @dx: Denominator slope to calculate the gain.
+ * @adc_vref: A/D word of the voltage reference used for the channel.
+ * @adc_gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are computed
+ * to calibrate the device.
+ */
+struct qpnp_vadc_linear_graph {
+	int64_t dy;
+	int64_t dx;
+	int64_t adc_vref;
+	int64_t adc_gnd;
+};
+
+/**
+ * struct qpnp_vadc_map_pt - Map the graph representation for ADC channel
+ * @x: Represent the ADC digitized code.
+ * @y: Represent the physical data which can be temperature, voltage,
+ *     resistance.
+ */
+struct qpnp_vadc_map_pt {
+	int32_t x;
+	int32_t y;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio - Represent scaling ratio for adc input.
+ * @num: Numerator scaling parameter.
+ * @den: Denominator scaling parameter.
+ */
+struct qpnp_vadc_scaling_ratio {
+	int32_t num;
+	int32_t den;
+};
+
+/**
+ * struct qpnp_adc_properties - Represent the ADC properties.
+ * @adc_reference: Reference voltage for QPNP ADC.
+ * @bitresolution: ADC bit resolution for QPNP ADC.
+ * @biploar: Polarity for QPNP ADC.
+ */
+struct qpnp_adc_properties {
+	uint32_t	adc_vdd_reference;
+	uint32_t	bitresolution;
+	bool		bipolar;
+};
+
+/**
+ * struct qpnp_vadc_chan_properties - Represent channel properties of the ADC.
+ * @offset_gain_numerator: The inverse numerator of the gain applied to the
+ *			   input channel.
+ * @offset_gain_denominator: The inverse denominator of the gain applied to the
+ *			     input channel.
+ * @adc_graph: ADC graph for the channel of struct type qpnp_adc_linear_graph.
+ */
+struct qpnp_vadc_chan_properties {
+	uint32_t			offset_gain_numerator;
+	uint32_t			offset_gain_denominator;
+	struct qpnp_vadc_linear_graph	adc_graph[2];
+};
+
+/**
+ * struct qpnp_adc_result - Represent the result of the QPNP ADC.
+ * @chan: The channel number of the requested conversion.
+ * @adc_code: The pre-calibrated digital output of a given ADC relative to the
+ *	      the ADC reference.
+ * @measurement: In units specific for a given ADC; most ADC uses reference
+ *		 voltage but some ADC uses reference current. This measurement
+ *		 here is a number relative to a reference of a given ADC.
+ * @physical: The data meaningful for each individual channel whether it is
+ *	      voltage, current, temperature, etc.
+ *	      All voltage units are represented in micro - volts.
+ *	      -Battery temperature units are represented as 0.1 DegC.
+ *	      -PA Therm temperature units are represented as DegC.
+ *	      -PMIC Die temperature units are represented as 0.001 DegC.
+ */
+struct qpnp_vadc_result {
+	uint32_t	chan;
+	int32_t		adc_code;
+	int64_t		measurement;
+	int64_t		physical;
+};
+
+/**
+ * struct qpnp_adc_amux - AMUX properties for individual channel
+ * @name: Channel string name.
+ * @channel_num: Channel in integer used from qpnp_adc_channels.
+ * @chan_path_prescaling: Channel scaling performed on the input signal.
+ * @adc_decimation: Sampling rate desired for the channel.
+ * adc_scale_fn: Scaling function to convert to the data meaningful for
+ *		 each individual channel whether it is voltage, current,
+ *		 temperature, etc and compensates the channel properties.
+ */
+struct qpnp_vadc_amux {
+	char					*name;
+	enum qpnp_vadc_channels			channel_num;
+	enum qpnp_adc_channel_scaling_param	chan_path_prescaling;
+	enum qpnp_adc_decimation_type		adc_decimation;
+	enum qpnp_adc_scale_fn_type		adc_scale_fn;
+	enum qpnp_adc_fast_avg_ctl		fast_avg_setup;
+	enum qpnp_adc_hw_settle_time		hw_settle_time;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio
+ *
+ */
+static const struct qpnp_vadc_scaling_ratio qpnp_vadc_amux_scaling_ratio[] = {
+	{1, 1},
+	{1, 3},
+	{1, 4},
+	{1, 6},
+	{1, 20}
+};
+
+/**
+ * struct qpnp_vadc_scale_fn - Scaling function prototype
+ * @chan: Function pointer to one of the scaling functions
+ *	which takes the adc properties, channel properties,
+ *	and returns the physical result
+ */
+struct qpnp_vadc_scale_fn {
+	int32_t (*chan) (int32_t,
+		const struct qpnp_adc_properties *,
+		const struct qpnp_vadc_chan_properties *,
+		struct qpnp_vadc_result *);
+};
+
+/**
+ * struct qpnp_adc_drv - QPNP ADC device structure.
+ * @spmi - spmi device for ADC peripheral.
+ * @offset - base offset for the ADC peripheral.
+ * @adc_prop - ADC properties specific to the ADC peripheral.
+ * @amux_prop - AMUX properties representing the ADC peripheral.
+ * @adc_channels - ADC channel properties for the ADC peripheral.
+ * @adc_irq - IRQ number that is mapped to the ADC peripheral.
+ * @adc_lock - ADC lock for access to the peripheral.
+ * @adc_rslt_completion - ADC result notification after interrupt
+ *			  is received.
+ */
+struct qpnp_adc_drv {
+	struct spmi_device		*spmi;
+	uint8_t				slave;
+	uint16_t			offset;
+	struct qpnp_adc_properties	*adc_prop;
+	struct qpnp_vadc_amux_properties	*amux_prop;
+	struct qpnp_vadc_amux		*adc_channels;
+	int				adc_irq;
+	struct mutex			adc_lock;
+	struct completion		adc_rslt_completion;
+};
+
+/**
+ * struct qpnp_vadc_amux_properties - QPNP VADC amux channel property.
+ * @amux_channel - Refer to the qpnp_vadc_channel list.
+ * @decimation - Sampling rate supported for the channel.
+ * @mode_sel - The basic mode of operation.
+ * @hw_settle_time - The time between AMUX being configured and the
+ *			start of conversion.
+ * @fast_avg_setup - Ability to provide single result from the ADC
+ *			that is an average of multiple measurements.
+ * @trigger_channel - HW trigger channel for conversion sequencer.
+ * @chan_prop - Represent the channel properties of the ADC.
+ */
+struct qpnp_vadc_amux_properties {
+	uint32_t			amux_channel;
+	uint32_t			decimation;
+	uint32_t			mode_sel;
+	uint32_t			hw_settle_time;
+	uint32_t			fast_avg_setup;
+	enum qpnp_vadc_trigger		trigger_channel;
+	struct qpnp_vadc_chan_properties	chan_prop[0];
+};
+
+/* Public API */
+#if defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE)				\
+			|| defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE_MODULE)
+/**
+ * qpnp_vadc_read() - Performs ADC read on the channel.
+ * @channel:	Input channel to perform the ADC read.
+ * @result:	Structure pointer of type adc_chan_result
+ *		in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_read(enum qpnp_vadc_channels channel,
+				struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_conv_seq_request() - Performs ADC read on the conversion
+ *				sequencer channel.
+ * @channel:	Input channel to perform the ADC read.
+ * @result:	Structure pointer of type adc_chan_result
+ *		in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_conv_seq_request(
+			enum qpnp_vadc_trigger trigger_channel,
+			enum qpnp_vadc_channels channel,
+			struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_check_result() - Performs check on the ADC raw code.
+ * @data:	Data used for verifying the range of the ADC code.
+ */
+int32_t qpnp_vadc_check_result(int32_t *data);
+
+/**
+ * qpnp_adc_get_devicetree_data() - Abstracts the ADC devicetree data.
+ * @spmi:	spmi ADC device.
+ * @adc_qpnp:	spmi device tree node structure
+ */
+int32_t qpnp_adc_get_devicetree_data(struct spmi_device *spmi,
+					struct qpnp_adc_drv *adc_qpnp);
+
+/**
+ * qpnp_vadc_configure() - Configure ADC device to start conversion.
+ * @chan_prop:	Individual channel properties for the AMUX channel.
+ */
+int32_t qpnp_vadc_configure(
+			struct qpnp_vadc_amux_properties *chan_prop);
+
+/**
+ * qpnp_adc_scale_default() - Scales the pre-calibrated digital output
+ *		of an ADC to the ADC reference and compensates for the
+ *		gain and offset.
+ * @adc_code:	pre-calibrated digital ouput of the ADC.
+ * @adc_prop:	adc properties of the qpnp adc such as bit resolution,
+ *		reference voltage.
+ * @chan_prop:	Individual channel properties to compensate the i/p scaling,
+ *		slope and offset.
+ * @chan_rslt:	Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_default(int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_vadc_chan_properties *chan_prop,
+			struct qpnp_vadc_result *chan_rslt);
+#else
+static inline int32_t qpnp_vadc_read(uint32_t channel,
+				struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_conv_seq_request(
+			enum qpnp_vadc_trigger trigger_channel,
+			enum qpnp_vadc_channels channel,
+			struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_default(int32_t adc_code,
+			const struct qpnp_adc_properties *adc_prop,
+			const struct qpnp_adc_chan_properties *chan_prop,
+			struct qpnp_adc_chan_result *chan_rslt)
+{ return -ENXIO; }
+#endif
+
+#endif
diff --git a/include/linux/usb/android.h b/include/linux/usb/android.h
index 7c2b33b..0b11fdaf 100644
--- a/include/linux/usb/android.h
+++ b/include/linux/usb/android.h
@@ -17,6 +17,8 @@
 #ifndef	__LINUX_USB_ANDROID_H
 #define	__LINUX_USB_ANDROID_H
 
+#include <linux/usb/composite.h>
+
 struct android_usb_platform_data {
 	int (*update_pid_and_serial_num)(uint32_t, const char *);
 	u32 swfi_latency;
@@ -24,4 +26,22 @@
 	bool cdrom;
 };
 
+#ifndef CONFIG_TARGET_CORE
+static inline int f_tcm_init(int (*connect_cb)(bool connect))
+{
+	/*
+	 * Fail bind() not init(). If a function init() returns error
+	 * android composite registration would fail.
+	 */
+	return 0;
+}
+static inline void f_tcm_exit(void)
+{
+}
+static inline int tcm_bind_config(struct usb_configuration *c)
+{
+	return -ENODEV;
+}
+#endif
+
 #endif	/* __LINUX_USB_ANDROID_H */
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 76f4396..72f5c96 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -56,7 +56,7 @@
 	struct sk_buff_head	rxq_pause;
 	struct urb		*interrupt;
 	struct usb_anchor	deferred;
-	struct tasklet_struct	bh;
+	struct work_struct	bh_w;
 
 	struct work_struct	kevent;
 	unsigned long		flags;
diff --git a/include/linux/wcnss_wlan.h b/include/linux/wcnss_wlan.h
index d7e65b0..46a5b1b 100644
--- a/include/linux/wcnss_wlan.h
+++ b/include/linux/wcnss_wlan.h
@@ -47,6 +47,7 @@
 int req_riva_power_on_lock(char *driver_name);
 int free_riva_power_on_lock(char *driver_name);
 unsigned int wcnss_get_serial_number(void);
+void wcnss_flush_delayed_boot_votes(void);
 #define wcnss_wlan_get_drvdata(dev) dev_get_drvdata(dev)
 #define wcnss_wlan_set_drvdata(dev, data) dev_set_drvdata((dev), (data))
 
diff --git a/include/media/msm_camera.h b/include/media/msm_camera.h
index ae81dcd..e4003759 100644
--- a/include/media/msm_camera.h
+++ b/include/media/msm_camera.h
@@ -213,6 +213,10 @@
 #define MSM_CAM_IOCTL_GET_INST_HANDLE \
 	_IOR(MSM_CAM_IOCTL_MAGIC, 60, uint32_t *)
 
+#define MSM_CAM_IOCTL_STATS_UNREG_BUF \
+	_IOR(MSM_CAM_IOCTL_MAGIC, 61, struct msm_stats_flush_bufq *)
+
+
 struct msm_stats_reqbuf {
 	int num_buf;		/* how many buffers requested */
 	int stats_type;	/* stats type */
@@ -468,6 +472,7 @@
 #define CMD_AXI_CFG_ZSL 43
 #define CMD_AXI_CFG_SNAP_VPE 44
 #define CMD_AXI_CFG_SNAP_THUMB_VPE 45
+
 #define CMD_CONFIG_PING_ADDR 46
 #define CMD_CONFIG_PONG_ADDR 47
 #define CMD_CONFIG_FREE_BUF_ADDR 48
@@ -475,6 +480,13 @@
 #define CMD_AXI_CFG_VIDEO_ALL_CHNLS 50
 #define CMD_VFE_BUFFER_RELEASE 51
 #define CMD_VFE_PROCESS_IRQ 52
+#define CMD_STATS_BG_ENABLE 53
+#define CMD_STATS_BF_ENABLE 54
+#define CMD_STATS_BHIST_ENABLE 55
+#define CMD_STATS_BG_BUF_RELEASE 56
+#define CMD_STATS_BF_BUF_RELEASE 57
+#define CMD_STATS_BHIST_BUF_RELEASE 58
+
 
 #define CMD_AXI_CFG_PRIM               BIT(8)
 #define CMD_AXI_CFG_PRIM_ALL_CHNLS     BIT(9)
@@ -524,7 +536,10 @@
 #define MSM_PMEM_C2D			17
 #define MSM_PMEM_MAINIMG_VPE    18
 #define MSM_PMEM_THUMBNAIL_VPE  19
-#define MSM_PMEM_MAX            20
+#define MSM_PMEM_BAYER_GRID		20
+#define MSM_PMEM_BAYER_FOCUS	21
+#define MSM_PMEM_BAYER_HIST		22
+#define MSM_PMEM_MAX            23
 
 #define STAT_AEAW			0
 #define STAT_AEC			1
@@ -534,7 +549,10 @@
 #define STAT_CS				5
 #define STAT_IHIST			6
 #define STAT_SKIN			7
-#define STAT_MAX			8
+#define STAT_BG				8
+#define STAT_BF				9
+#define STAT_BHIST			10
+#define STAT_MAX			11
 
 #define FRAME_PREVIEW_OUTPUT1		0
 #define FRAME_PREVIEW_OUTPUT2		1
@@ -1235,27 +1253,31 @@
 #define CSI_DECODE_10BIT 2
 #define CSI_DECODE_DPCM_10_8_10 5
 
-#define ISPIF_STREAM(intf, action) (((intf)<<ISPIF_S_STREAM_SHIFT)+(action))
-#define ISPIF_ON_FRAME_BOUNDARY	(0x01 << 0)
-#define ISPIF_OFF_FRAME_BOUNDARY    (0x01 << 1)
-#define ISPIF_OFF_IMMEDIATELY       (0x01 << 2)
-#define ISPIF_S_STREAM_SHIFT	4
-
+#define ISPIF_STREAM(intf, action, vfe) (((intf)<<ISPIF_S_STREAM_SHIFT)+\
+	(action)+((vfe)<<ISPIF_VFE_INTF_SHIFT))
+#define ISPIF_ON_FRAME_BOUNDARY   (0x01 << 0)
+#define ISPIF_OFF_FRAME_BOUNDARY  (0x01 << 1)
+#define ISPIF_OFF_IMMEDIATELY     (0x01 << 2)
+#define ISPIF_S_STREAM_SHIFT      4
+#define ISPIF_VFE_INTF_SHIFT      12
 
 #define PIX_0 (0x01 << 0)
 #define RDI_0 (0x01 << 1)
 #define PIX_1 (0x01 << 2)
 #define RDI_1 (0x01 << 3)
-#define PIX_2 (0x01 << 4)
-#define RDI_2 (0x01 << 5)
+#define RDI_2 (0x01 << 4)
 
+enum msm_ispif_vfe_intf {
+	VFE0,
+	VFE1,
+	VFE_MAX,
+};
 
 enum msm_ispif_intftype {
 	PIX0,
 	RDI0,
 	PIX1,
 	RDI1,
-	PIX2,
 	RDI2,
 	INTF_MAX,
 };
@@ -1290,6 +1312,7 @@
 	uint8_t intftype;
 	uint16_t cid_mask;
 	uint8_t csid;
+	uint8_t vfe_intf;
 };
 
 struct msm_ispif_params_list {
@@ -1655,6 +1678,7 @@
 	GESTURE_DEV,
 	IRQ_ROUTER_DEV,
 	CPP_DEV,
+	CCI_DEV,
 };
 
 struct msm_mctl_set_sdev_data {
@@ -1858,6 +1882,12 @@
 	struct msm_cpp_frame_strip_info *strip_info;
 };
 
+struct msm_ver_num_info {
+	uint32_t main;
+	uint32_t minor;
+	uint32_t rev;
+};
+
 #define VIDIOC_MSM_CPP_CFG \
 	_IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_camera_v4l2_ioctl_t)
 
diff --git a/include/media/msm_isp.h b/include/media/msm_isp.h
index 3df6ded..ab9e70c 100644
--- a/include/media/msm_isp.h
+++ b/include/media/msm_isp.h
@@ -62,6 +62,10 @@
 #define MSG_ID_OUTPUT_TERTIARY1         43
 #define MSG_ID_STOP_LS_ACK              44
 #define MSG_ID_OUTPUT_TERTIARY2         45
+#define MSG_ID_STATS_BG                 46
+#define MSG_ID_STATS_BF                 47
+#define MSG_ID_STATS_BHIST              48
+
 
 /* ISP command IDs */
 #define VFE_CMD_DUMMY_0                                 0
@@ -206,6 +210,14 @@
 #define VFE_CMD_STATS_REQBUF                            139
 #define VFE_CMD_STATS_ENQUEUEBUF                        140
 #define VFE_CMD_STATS_FLUSH_BUFQ                        141
+#define VFE_CMD_STATS_UNREGBUF                          142
+#define VFE_CMD_STATS_BG_START                          143
+#define VFE_CMD_STATS_BG_STOP                           144
+#define VFE_CMD_STATS_BF_START                          145
+#define VFE_CMD_STATS_BF_STOP                           146
+#define VFE_CMD_STATS_BHIST_START                       147
+#define VFE_CMD_STATS_BHIST_STOP                        148
+#define VFE_CMD_RESET_2                                 149
 
 struct msm_isp_cmd {
 	int32_t  id;
diff --git a/include/media/vcap_fmt.h b/include/media/vcap_fmt.h
index 92240bf1..00e0375 100644
--- a/include/media/vcap_fmt.h
+++ b/include/media/vcap_fmt.h
@@ -44,6 +44,43 @@
 	HAL_VCAP_RGB,
 };
 
+enum nr_mode {
+	NR_DISABLE = 0,
+	NR_AUTO,
+	NR_MANUAL,
+};
+
+enum nr_decay_ratio {
+	NR_Decay_Ratio_26 = 0,
+	NR_Decay_Ratio_25,
+	NR_Decay_Ratio_24,
+	NR_Decay_Ratio_23,
+	NR_Decay_Ratio_22,
+	NR_Decay_Ratio_21,
+	NR_Decay_Ratio_20,
+	NR_Decay_Ratio_19,
+};
+
+struct nr_config {
+	uint8_t max_blend_ratio;
+	uint8_t scale_diff_ratio;
+	uint8_t diff_limit_ratio;
+	uint8_t scale_motion_ratio;
+	uint8_t blend_limit_ratio;
+};
+
+struct nr_param {
+	enum nr_mode mode;
+	enum nr_decay_ratio decay_ratio;
+	uint8_t window;
+	struct nr_config luma;
+	struct nr_config chroma;
+};
+
+#define VCAPIOC_NR_S_PARAMS _IOWR('V', (BASE_VIDIOC_PRIVATE+0), struct nr_param)
+
+#define VCAPIOC_NR_G_PARAMS _IOWR('V', (BASE_VIDIOC_PRIVATE+1), struct nr_param)
+
 struct v4l2_format_vc_ext {
 	enum hal_vcap_mode     mode;
 	enum hal_vcap_polar    h_polar;
diff --git a/include/media/vcap_v4l2.h b/include/media/vcap_v4l2.h
index 9719aa6..390a843 100644
--- a/include/media/vcap_v4l2.h
+++ b/include/media/vcap_v4l2.h
@@ -120,7 +120,8 @@
 
 	void					*bufMotion;
 	struct nr_buffer		bufNR;
-	bool					nr_enabled;
+	struct nr_param			nr_param;
+	bool					nr_update;
 };
 
 struct vp_work_t {
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index 8e8778a..ac4ec09 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -573,6 +573,7 @@
 
 /*  Payload of the #ADM_CMD_CONNECT_AFE_PORT_V5 command.*/
 struct adm_cmd_connect_afe_port_v5 {
+	struct apr_hdr     hdr;
 	u8                  mode;
 /* ID of the stream router (RX/TX). Use the
  * ADM_STRTR_ID_RX or ADM_STRTR_IDX macros
diff --git a/include/sound/q6afe-v2.h b/include/sound/q6afe-v2.h
index 1587d38..b1ed9a4 100644
--- a/include/sound/q6afe-v2.h
+++ b/include/sound/q6afe-v2.h
@@ -77,6 +77,7 @@
 int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain);
 int afe_loopback_gain(u16 port_id, u16 volume);
 int afe_validate_port(u16 port_id);
+int afe_get_port_index(u16 port_id);
 int afe_start_pseudo_port(u16 port_id);
 int afe_stop_pseudo_port(u16 port_id);
 int afe_cmd_memory_map(u32 dma_addr_p, u32 dma_buf_sz);
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index d0ec4f3..1967297 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -58,7 +58,7 @@
 	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
 		msg, raw_smp_processor_id(),
 		current->comm, task_pid_nr(current));
-	printk(KERN_EMERG " lock: %ps, .magic: %08x, .owner: %s/%d, "
+	printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
 			".owner_cpu: %d\n",
 		lock, lock->magic,
 		owner ? owner->comm : "<none>",
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c34cbbd..831509c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5649,7 +5649,12 @@
 __alloc_contig_migrate_alloc(struct page *page, unsigned long private,
 			     int **resultp)
 {
-	return alloc_page(GFP_HIGHUSER_MOVABLE);
+	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+
+	if (PageHighMem(page))
+		gfp_mask |= __GFP_HIGHMEM;
+
+	return alloc_page(gfp_mask);
 }
 
 /* [start, end) must belong to a single zone. */
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 02ea082..0563af9 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -951,6 +951,9 @@
 	if (test_bit(HCI_RAW, &hdev->flags))
 		return;
 
+	if (conn->type == LE_LINK)
+		return;
+
 	if (conn->mode != HCI_CM_SNIFF)
 		goto timer;
 
@@ -1018,6 +1021,9 @@
 	if (test_bit(HCI_RAW, &hdev->flags))
 		return;
 
+	if (conn->type == LE_LINK)
+		return;
+
 	if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
 		return;
 
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index da8b2dc..56bdd46 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2124,6 +2124,55 @@
 EXPORT_SYMBOL(hci_send_sco);
 
 /* ---- HCI TX task (outgoing data) ---- */
+/* HCI ACL Connection scheduler */
+static inline struct hci_conn *hci_low_sent_acl(struct hci_dev *hdev,
+								int *quote)
+{
+	struct hci_conn_hash *h = &hdev->conn_hash;
+	struct hci_conn *conn = NULL;
+	int num = 0, min = ~0, conn_num = 0;
+	struct list_head *p;
+
+	/* We don't have to lock device here. Connections are always
+	 * added and removed with TX task disabled. */
+	list_for_each(p, &h->list) {
+		struct hci_conn *c;
+		c = list_entry(p, struct hci_conn, list);
+		if (c->type == ACL_LINK)
+			conn_num++;
+
+		if (skb_queue_empty(&c->data_q))
+			continue;
+
+		if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
+			continue;
+
+		num++;
+
+		if (c->sent < min) {
+			min  = c->sent;
+			conn = c;
+		}
+	}
+
+	if (conn) {
+		int cnt, q;
+		cnt = hdev->acl_cnt;
+		q = cnt / num;
+		*quote = q ? q : 1;
+	} else
+		*quote = 0;
+
+	if ((*quote == hdev->acl_cnt) &&
+		(conn->sent == (hdev->acl_pkts - 1)) &&
+		(conn_num > 1)) {
+			*quote = 0;
+			conn = NULL;
+	}
+
+	BT_DBG("conn %p quote %d", conn, *quote);
+	return conn;
+}
 
 /* HCI Connection scheduler */
 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
@@ -2217,8 +2266,10 @@
 	}
 
 	while (hdev->acl_cnt > 0 &&
-		(conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
-		while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
+		((conn = hci_low_sent_acl(hdev, &quote)) != NULL)) {
+
+		while (quote > 0 &&
+			  (skb = skb_dequeue(&conn->data_q))) {
 			int count = 1;
 
 			BT_DBG("skb %p len %d", skb, skb->len);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 1390eca..dc4bf2f 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -138,6 +138,21 @@
 
 static inline void rfcomm_session_put(struct rfcomm_session *s)
 {
+	bool match = false;
+	struct rfcomm_session *sess;
+	struct list_head *p, *n;
+	list_for_each_safe(p, n, &session_list) {
+		sess = list_entry(p, struct rfcomm_session, list);
+		if (s == sess) {
+			match = true;
+			break;
+		}
+	}
+	if (!match) {
+		BT_ERR("session already freed previously");
+		dump_stack();
+		return;
+	}
 	if (atomic_dec_and_test(&s->refcnt))
 		rfcomm_session_del(s);
 }
diff --git a/sound/soc/codecs/wcd9304.c b/sound/soc/codecs/wcd9304.c
index bbf1b89..f4f55fa 100644
--- a/sound/soc/codecs/wcd9304.c
+++ b/sound/soc/codecs/wcd9304.c
@@ -33,6 +33,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/kernel.h>
 #include <linux/gpio.h>
+#include <linux/wait.h>
 #include "wcd9304.h"
 
 #define WCD9304_RATES (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|\
@@ -55,12 +56,15 @@
 #define AIF1_PB 1
 #define AIF1_CAP 2
 #define NUM_CODEC_DAIS 2
+#define SLIM_CLOSE_TIMEOUT 1000
 
 struct sitar_codec_dai_data {
 	u32 rate;
 	u32 *ch_num;
 	u32 ch_act;
 	u32 ch_tot;
+	u32 ch_mask;
+	wait_queue_head_t dai_wait;
 };
 
 #define SITAR_MCLK_RATE_12288KHZ 12288000
@@ -2833,13 +2837,48 @@
 	},
 };
 
+static int sitar_codec_enable_chmask(struct sitar_priv *sitar,
+	int event, int index)
+{
+	int ret = 0;
+	u32 k = 0;
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		for (k = 0; k < sitar->dai[index].ch_tot; k++) {
+			ret = wcd9xxx_get_slave_port(
+						sitar->dai[index].ch_num[k]);
+			if (ret < 0) {
+				pr_err("%s: Invalid Slave port ID: %d\n",
+					   __func__, ret);
+				ret = -EINVAL;
+				break;
+			}
+			sitar->dai[index].ch_mask |= 1 << ret;
+		}
+		ret = 0;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ret = wait_event_timeout(sitar->dai[index].dai_wait,
+					(sitar->dai[index].ch_mask == 0),
+					msecs_to_jiffies(SLIM_CLOSE_TIMEOUT));
+		if (!ret) {
+			pr_err("%s: slim close tx/rx timeout\n",
+				   __func__);
+			ret = -EINVAL;
+		}
+		ret = 0;
+		break;
+	}
+	return ret;
+}
+
 static int sitar_codec_enable_slimrx(struct snd_soc_dapm_widget *w,
 	struct snd_kcontrol *kcontrol, int event)
 {
 	struct wcd9xxx *sitar;
 	struct snd_soc_codec *codec = w->codec;
 	struct sitar_priv *sitar_p = snd_soc_codec_get_drvdata(codec);
-	u32  j = 0;
+	u32  j = 0, ret = 0;
 	codec->control_data = dev_get_drvdata(codec->dev->parent);
 	sitar = codec->control_data;
 	/* Execute the callback only if interface type is slimbus */
@@ -2856,11 +2895,13 @@
 				break;
 			}
 		}
-		if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot)
-			wcd9xxx_cfg_slim_sch_rx(sitar,
+		if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot) {
+			ret = sitar_codec_enable_chmask(sitar_p, event, j);
+			ret = wcd9xxx_cfg_slim_sch_rx(sitar,
 					sitar_p->dai[j].ch_num,
 					sitar_p->dai[j].ch_tot,
 					sitar_p->dai[j].rate);
+		}
 		break;
 	case SND_SOC_DAPM_POST_PMD:
 		for (j = 0; j < ARRAY_SIZE(sitar_dai); j++) {
@@ -2876,17 +2917,14 @@
 			wcd9xxx_close_slim_sch_rx(sitar,
 					sitar_p->dai[j].ch_num,
 					sitar_p->dai[j].ch_tot);
-			/* Wait for remove channel to complete
-			 * before derouting Rx path
-			 */
-			usleep_range(15000, 15000);
 			sitar_p->dai[j].rate = 0;
 			memset(sitar_p->dai[j].ch_num, 0, (sizeof(u32)*
 					sitar_p->dai[j].ch_tot));
 			sitar_p->dai[j].ch_tot = 0;
+			ret = sitar_codec_enable_chmask(sitar_p, event, j);
 		}
 	}
-	return 0;
+	return ret;
 }
 
 static int sitar_codec_enable_slimtx(struct snd_soc_dapm_widget *w,
@@ -2896,7 +2934,7 @@
 	struct snd_soc_codec *codec = w->codec;
 	struct sitar_priv *sitar_p = snd_soc_codec_get_drvdata(codec);
 	/* index to the DAI ID, for now hardcoding */
-	u32  j = 0;
+	u32  j = 0, ret = 0;
 
 	codec->control_data = dev_get_drvdata(codec->dev->parent);
 	sitar = codec->control_data;
@@ -2915,11 +2953,13 @@
 				break;
 			}
 		}
-		if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot)
-			wcd9xxx_cfg_slim_sch_tx(sitar,
+		if (sitar_p->dai[j].ch_act == sitar_p->dai[j].ch_tot) {
+			ret = sitar_codec_enable_chmask(sitar_p, event, j);
+			ret = wcd9xxx_cfg_slim_sch_tx(sitar,
 					sitar_p->dai[j].ch_num,
 					sitar_p->dai[j].ch_tot,
 					sitar_p->dai[j].rate);
+		}
 		break;
 	case SND_SOC_DAPM_POST_PMD:
 		for (j = 0; j < ARRAY_SIZE(sitar_dai); j++) {
@@ -2939,9 +2979,10 @@
 			memset(sitar_p->dai[j].ch_num, 0, (sizeof(u32)*
 					sitar_p->dai[j].ch_tot));
 			sitar_p->dai[j].ch_tot = 0;
+			ret = sitar_codec_enable_chmask(sitar_p, event, j);
 		}
 	}
-	return 0;
+	return ret;
 }
 
 
@@ -4600,7 +4641,7 @@
 {
 	struct sitar_priv *priv = data;
 	struct snd_soc_codec *codec = priv->codec;
-	int i, j;
+	int i, j, k, port_id, ch_mask_temp;
 	u8 val;
 
 
@@ -4608,14 +4649,30 @@
 		slimbus_value = wcd9xxx_interface_reg_read(codec->control_data,
 			SITAR_SLIM_PGD_PORT_INT_STATUS0 + i);
 		for_each_set_bit(j, &slimbus_value, BITS_PER_BYTE) {
+			port_id = i*8 + j;
 			val = wcd9xxx_interface_reg_read(codec->control_data,
-				SITAR_SLIM_PGD_PORT_INT_SOURCE0 + i*8 + j);
+				SITAR_SLIM_PGD_PORT_INT_SOURCE0 + port_id);
 			if (val & 0x1)
-				pr_err_ratelimited("overflow error on port %x,"
-					" value %x\n", i*8 + j, val);
+				pr_err_ratelimited("overflow error on port %x, value %x\n",
+						port_id, val);
 			if (val & 0x2)
-				pr_err_ratelimited("underflow error on port %x,"
-					" value %x\n", i*8 + j, val);
+				pr_err_ratelimited("underflow error on port %x,value %x\n",
+						port_id, val);
+			if (val & 0x4) {
+				pr_debug("%s: port %x disconnect value %x\n",
+						 __func__, port_id, val);
+				for (k = 0; k < ARRAY_SIZE(sitar_dai); k++) {
+					ch_mask_temp = 1 << port_id;
+					if (ch_mask_temp &
+							priv->dai[k].ch_mask) {
+						priv->dai[k].ch_mask &=
+							~ch_mask_temp;
+					    if (!priv->dai[k].ch_mask)
+							wake_up(
+						&priv->dai[k].dai_wait);
+					}
+				}
+			}
 		}
 		wcd9xxx_interface_reg_write(codec->control_data,
 			SITAR_SLIM_PGD_PORT_INT_CLR0 + i, 0xFF);
@@ -4783,6 +4840,10 @@
 	{SITAR_A_RX_LINE_1_GAIN, 0x10, 0x10},
 	{SITAR_A_RX_LINE_2_GAIN, 0x10, 0x10},
 
+	/* Set the MICBIAS default output as pull down*/
+	{SITAR_A_MICB_1_CTL, 0x01, 0x01},
+	{SITAR_A_MICB_2_CTL, 0x01, 0x01},
+
 	/* Initialize mic biases to differential mode */
 	{SITAR_A_MICB_1_INT_RBIAS, 0x24, 0x24},
 	{SITAR_A_MICB_2_INT_RBIAS, 0x24, 0x24},
@@ -4983,6 +5044,7 @@
 		}
 		sitar->dai[i].ch_num = kzalloc((sizeof(unsigned int)*
 					ch_cnt), GFP_KERNEL);
+		init_waitqueue_head(&sitar->dai[i].dai_wait);
 	}
 
 	codec->ignore_pmdown_time = 1;
diff --git a/sound/soc/codecs/wcd9310.c b/sound/soc/codecs/wcd9310.c
index 68892c1..15a5567 100644
--- a/sound/soc/codecs/wcd9310.c
+++ b/sound/soc/codecs/wcd9310.c
@@ -7590,6 +7590,11 @@
 	{TABLA_A_RX_LINE_3_GAIN, 0x10, 0x10},
 	{TABLA_A_RX_LINE_4_GAIN, 0x10, 0x10},
 
+	/* Set the MICBIAS default output as pull down*/
+	{TABLA_A_MICB_1_CTL, 0x01, 0x01},
+	{TABLA_A_MICB_2_CTL, 0x01, 0x01},
+	{TABLA_A_MICB_3_CTL, 0x01, 0x01},
+
 	/* Initialize mic biases to differential mode */
 	{TABLA_A_MICB_1_INT_RBIAS, 0x24, 0x24},
 	{TABLA_A_MICB_2_INT_RBIAS, 0x24, 0x24},
@@ -7645,11 +7650,16 @@
 };
 
 static const struct tabla_reg_mask_val tabla_1_x_codec_reg_init_val[] = {
+	/* Set the MICBIAS default output as pull down*/
+	{TABLA_1_A_MICB_4_CTL, 0x01, 0x01},
 	/* Initialize mic biases to differential mode */
 	{TABLA_1_A_MICB_4_INT_RBIAS, 0x24, 0x24},
 };
 
 static const struct tabla_reg_mask_val tabla_2_higher_codec_reg_init_val[] = {
+
+	/* Set the MICBIAS default output as pull down*/
+	{TABLA_2_A_MICB_4_CTL, 0x01, 0x01},
 	/* Initialize mic biases to differential mode */
 	{TABLA_2_A_MICB_4_INT_RBIAS, 0x24, 0x24},
 };
diff --git a/sound/soc/codecs/wcd9320.c b/sound/soc/codecs/wcd9320.c
index 67a5d8a..6da9166 100644
--- a/sound/soc/codecs/wcd9320.c
+++ b/sound/soc/codecs/wcd9320.c
@@ -1890,6 +1890,12 @@
 	return 0;
 }
 
+static int taiko_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
+				     struct snd_kcontrol *kcontrol, int event)
+{
+	pr_debug("%s %d %s\n", __func__, event, w->name);
+	return 0;
+}
 
 static int taiko_codec_enable_dmic(struct snd_soc_dapm_widget *w,
 	struct snd_kcontrol *kcontrol, int event)
@@ -2891,6 +2897,13 @@
 	return 0;
 }
 
+static int taiko_spk_dac_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	pr_debug("%s %s %d\n", __func__, w->name, event);
+	return 0;
+}
+
 static const struct snd_soc_dapm_route audio_i2s_map[] = {
 	{"RX_I2S_CLK", NULL, "CDC_CONN"},
 	{"SLIM RX1", NULL, "RX_I2S_CLK"},
@@ -3038,6 +3051,7 @@
 	{"LINEOUT2", NULL, "LINEOUT2 PA"},
 	{"LINEOUT3", NULL, "LINEOUT3 PA"},
 	{"LINEOUT4", NULL, "LINEOUT4 PA"},
+	{"SPK_OUT", NULL, "SPK PA"},
 
 	{"LINEOUT1 PA", NULL, "LINEOUT1_PA_MIXER"},
 	{"LINEOUT1_PA_MIXER", NULL, "LINEOUT1 DAC"},
@@ -3060,6 +3074,9 @@
 	{"RX6 DSM MUX", "CIC_OUT", "RX6 MIX1"},
 	{"LINEOUT4 DAC", NULL, "RX6 DSM MUX"},
 
+	{"SPK PA", NULL, "SPK DAC"},
+	{"SPK DAC", NULL, "RX7 MIX1"},
+
 	{"RX1 CHAIN", NULL, "RX1 MIX2"},
 	{"RX2 CHAIN", NULL, "RX2 MIX2"},
 	{"RX1 CHAIN", NULL, "ANC"},
@@ -3070,6 +3087,7 @@
 	{"LINEOUT2 DAC", NULL, "RX_BIAS"},
 	{"LINEOUT3 DAC", NULL, "RX_BIAS"},
 	{"LINEOUT4 DAC", NULL, "RX_BIAS"},
+	{"SPK DAC", NULL, "RX_BIAS"},
 
 	{"RX1 MIX1", NULL, "COMP1_CLK"},
 	{"RX2 MIX1", NULL, "COMP1_CLK"},
@@ -4192,6 +4210,7 @@
 	SND_SOC_DAPM_OUTPUT("LINEOUT2"),
 	SND_SOC_DAPM_OUTPUT("LINEOUT3"),
 	SND_SOC_DAPM_OUTPUT("LINEOUT4"),
+	SND_SOC_DAPM_OUTPUT("SPK_OUT"),
 
 	SND_SOC_DAPM_PGA_E("LINEOUT1 PA", TAIKO_A_RX_LINE_CNP_EN, 0, 0, NULL,
 			0, taiko_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
@@ -4205,6 +4224,9 @@
 	SND_SOC_DAPM_PGA_E("LINEOUT4 PA", TAIKO_A_RX_LINE_CNP_EN, 3, 0, NULL,
 			0, taiko_codec_enable_lineout, SND_SOC_DAPM_PRE_PMU |
 			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_PGA_E("SPK PA", TAIKO_A_SPKR_DRV_EN, 7, 0 , NULL,
+			   0, taiko_codec_enable_spk_pa, SND_SOC_DAPM_PRE_PMU |
+			   SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 	SND_SOC_DAPM_DAC_E("LINEOUT1 DAC", NULL, TAIKO_A_RX_LINE_1_DAC_CTL, 7, 0
 		, taiko_lineout_dac_event,
@@ -4223,6 +4245,10 @@
 	SND_SOC_DAPM_SWITCH("LINEOUT4 DAC GROUND", SND_SOC_NOPM, 0, 0,
 				&lineout4_ground_switch),
 
+	SND_SOC_DAPM_DAC_E("SPK DAC", NULL, SND_SOC_NOPM, 0, 0,
+			   taiko_spk_dac_event,
+			   SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
 	SND_SOC_DAPM_MIXER_E("RX1 MIX2", TAIKO_A_CDC_CLK_RX_B1_CTL, 0, 0, NULL,
 		0, taiko_codec_reset_interpolator, SND_SOC_DAPM_PRE_PMU |
 		SND_SOC_DAPM_POST_PMU),
@@ -6709,11 +6735,16 @@
 	struct taiko_priv *taiko;
 	int rc = 0;
 
-	if (!codec || !cfg->calibration) {
-		pr_err("Error: no codec or calibration\n");
+	if (!codec) {
+		pr_err("%s: no codec\n", __func__);
 		return -EINVAL;
 	}
 
+	if (!cfg->calibration) {
+		pr_warn("%s: mbhc is not configured\n", __func__);
+		return 0;
+	}
+
 	if (cfg->mclk_rate != TAIKO_MCLK_RATE_12288KHZ) {
 		if (cfg->mclk_rate == TAIKO_MCLK_RATE_9600KHZ)
 			pr_err("Error: clock rate %dHz is not yet supported\n",
@@ -7158,6 +7189,102 @@
 };
 #endif
 
+static int taiko_setup_irqs(struct taiko_priv *taiko)
+{
+	int ret;
+	int i;
+	struct snd_soc_codec *codec = taiko->codec;
+
+	ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION,
+				  taiko_hs_insert_irq, "Headset insert detect",
+				  taiko);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+			TAIKO_IRQ_MBHC_INSERTION);
+		goto err_insert_irq;
+	}
+	wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION);
+
+	ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_REMOVAL,
+				  taiko_hs_remove_irq, "Headset remove detect",
+				  taiko);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+			TAIKO_IRQ_MBHC_REMOVAL);
+		goto err_remove_irq;
+	}
+
+	ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_POTENTIAL,
+				  taiko_dce_handler, "DC Estimation detect",
+				  taiko);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+			TAIKO_IRQ_MBHC_POTENTIAL);
+		goto err_potential_irq;
+	}
+
+	ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_RELEASE,
+				 taiko_release_handler, "Button Release detect",
+				 taiko);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+			TAIKO_IRQ_MBHC_RELEASE);
+		goto err_release_irq;
+	}
+
+	ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_SLIMBUS,
+				  taiko_slimbus_irq, "SLIMBUS Slave", taiko);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+			TAIKO_IRQ_SLIMBUS);
+		goto err_slimbus_irq;
+	}
+
+	for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++)
+		wcd9xxx_interface_reg_write(codec->control_data,
+					   TAIKO_SLIM_PGD_PORT_INT_EN0 + i,
+					   0xFF);
+
+	ret = wcd9xxx_request_irq(codec->control_data,
+				  TAIKO_IRQ_HPH_PA_OCPL_FAULT,
+				  taiko_hphl_ocp_irq,
+				  "HPH_L OCP detect", taiko);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+			TAIKO_IRQ_HPH_PA_OCPL_FAULT);
+		goto err_hphl_ocp_irq;
+	}
+	wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_HPH_PA_OCPL_FAULT);
+
+	ret = wcd9xxx_request_irq(codec->control_data,
+				  TAIKO_IRQ_HPH_PA_OCPR_FAULT,
+				  taiko_hphr_ocp_irq,
+				  "HPH_R OCP detect", taiko);
+	if (ret) {
+		pr_err("%s: Failed to request irq %d\n", __func__,
+		       TAIKO_IRQ_HPH_PA_OCPR_FAULT);
+		goto err_hphr_ocp_irq;
+	}
+	wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_HPH_PA_OCPR_FAULT);
+
+err_hphr_ocp_irq:
+	wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_HPH_PA_OCPL_FAULT,
+			 taiko);
+err_hphl_ocp_irq:
+	wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_SLIMBUS, taiko);
+err_slimbus_irq:
+	wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_RELEASE, taiko);
+err_release_irq:
+	wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_POTENTIAL, taiko);
+err_potential_irq:
+	wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_REMOVAL, taiko);
+err_remove_irq:
+	wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION, taiko);
+err_insert_irq:
+
+	return ret;
+}
+
 static int taiko_codec_probe(struct snd_soc_codec *codec)
 {
 	struct wcd9xxx *control;
@@ -7167,6 +7294,7 @@
 	int i;
 	int ch_cnt;
 
+
 	codec->control_data = dev_get_drvdata(codec->dev->parent);
 	control = codec->control_data;
 
@@ -7234,70 +7362,8 @@
 
 	snd_soc_dapm_sync(dapm);
 
-	ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION,
-		taiko_hs_insert_irq, "Headset insert detect", taiko);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-			TAIKO_IRQ_MBHC_INSERTION);
-		goto err_insert_irq;
-	}
-	wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION);
+	(void) taiko_setup_irqs(taiko);
 
-	ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_REMOVAL,
-		taiko_hs_remove_irq, "Headset remove detect", taiko);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-			TAIKO_IRQ_MBHC_REMOVAL);
-		goto err_remove_irq;
-	}
-
-	ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_POTENTIAL,
-		taiko_dce_handler, "DC Estimation detect", taiko);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-			TAIKO_IRQ_MBHC_POTENTIAL);
-		goto err_potential_irq;
-	}
-
-	ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_MBHC_RELEASE,
-		taiko_release_handler, "Button Release detect", taiko);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-			TAIKO_IRQ_MBHC_RELEASE);
-		goto err_release_irq;
-	}
-
-	ret = wcd9xxx_request_irq(codec->control_data, TAIKO_IRQ_SLIMBUS,
-		taiko_slimbus_irq, "SLIMBUS Slave", taiko);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-			TAIKO_IRQ_SLIMBUS);
-		goto err_slimbus_irq;
-	}
-
-	for (i = 0; i < WCD9XXX_SLIM_NUM_PORT_REG; i++)
-		wcd9xxx_interface_reg_write(codec->control_data,
-			TAIKO_SLIM_PGD_PORT_INT_EN0 + i, 0xFF);
-
-	ret = wcd9xxx_request_irq(codec->control_data,
-		TAIKO_IRQ_HPH_PA_OCPL_FAULT, taiko_hphl_ocp_irq,
-		"HPH_L OCP detect", taiko);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-			TAIKO_IRQ_HPH_PA_OCPL_FAULT);
-		goto err_hphl_ocp_irq;
-	}
-	wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_HPH_PA_OCPL_FAULT);
-
-	ret = wcd9xxx_request_irq(codec->control_data,
-		TAIKO_IRQ_HPH_PA_OCPR_FAULT, taiko_hphr_ocp_irq,
-		"HPH_R OCP detect", taiko);
-	if (ret) {
-		pr_err("%s: Failed to request irq %d\n", __func__,
-			TAIKO_IRQ_HPH_PA_OCPR_FAULT);
-		goto err_hphr_ocp_irq;
-	}
-	wcd9xxx_disable_irq(codec->control_data, TAIKO_IRQ_HPH_PA_OCPR_FAULT);
 	for (i = 0; i < ARRAY_SIZE(taiko_dai); i++) {
 		switch (taiko_dai[i].id) {
 		case AIF1_PB:
@@ -7338,20 +7404,6 @@
 	codec->ignore_pmdown_time = 1;
 	return ret;
 
-err_hphr_ocp_irq:
-	wcd9xxx_free_irq(codec->control_data,
-			TAIKO_IRQ_HPH_PA_OCPL_FAULT, taiko);
-err_hphl_ocp_irq:
-	wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_SLIMBUS, taiko);
-err_slimbus_irq:
-	wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_RELEASE, taiko);
-err_release_irq:
-	wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_POTENTIAL, taiko);
-err_potential_irq:
-	wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_REMOVAL, taiko);
-err_remove_irq:
-	wcd9xxx_free_irq(codec->control_data, TAIKO_IRQ_MBHC_INSERTION, taiko);
-err_insert_irq:
 err_pdata:
 	mutex_destroy(&taiko->codec_resource_lock);
 	kfree(taiko);
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 9b60a56..894e114 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -153,13 +153,25 @@
 	help
 	 To add support for SoC audio on MSM8960 and APQ8064 boards
 
+config AUDIO_OCMEM
+	bool "Enable OCMEM for audio/voice usecase"
+	depends on MSM_OCMEM
+	default n
+	help
+	 To add support for on-chip memory use
+	 for audio use cases on MSM8974.
+	 OCMEM gets exercised for low-power
+	 audio and voice use cases.
+
 config SND_SOC_MSM8974
 	tristate "SoC Machine driver for MSM8974 boards"
 	depends on ARCH_MSM8974
 	select SND_SOC_QDSP6V2
 	select SND_SOC_MSM_STUB
 	select SND_SOC_MSM_HOSTLESS_PCM
+	select SND_SOC_WCD9320
 	select SND_DYNAMIC_MINORS
+	select AUDIO_OCMEM
 	help
 	 To add support for SoC audio on MSM8974.
 	 This will enable sound soc drivers which
diff --git a/sound/soc/msm/apq8064.c b/sound/soc/msm/apq8064.c
index d8a4624..a596f03 100644
--- a/sound/soc/msm/apq8064.c
+++ b/sound/soc/msm/apq8064.c
@@ -718,10 +718,10 @@
 				struct snd_ctl_elem_value *ucontrol)
 {
 	switch (ucontrol->value.integer.value[0]) {
-	case 0:
+	case 8000:
 		msm_btsco_rate = BTSCO_RATE_8KHZ;
 		break;
-	case 1:
+	case 16000:
 		msm_btsco_rate = BTSCO_RATE_16KHZ;
 		break;
 	default:
@@ -1152,8 +1152,9 @@
 	snd_soc_dapm_sync(dapm);
 
 	err = snd_soc_jack_new(codec, "Headset Jack",
-		(SND_JACK_HEADSET | SND_JACK_OC_HPHL | SND_JACK_OC_HPHR),
-		&hs_jack);
+			       (SND_JACK_HEADSET | SND_JACK_OC_HPHL |
+				SND_JACK_OC_HPHR | SND_JACK_UNSUPPORTED),
+			       &hs_jack);
 	if (err) {
 		pr_err("failed to create new jack\n");
 		return err;
diff --git a/sound/soc/msm/mdm9615.c b/sound/soc/msm/mdm9615.c
index dbe5d00..b80a0a9 100644
--- a/sound/soc/msm/mdm9615.c
+++ b/sound/soc/msm/mdm9615.c
@@ -733,10 +733,10 @@
 				struct snd_ctl_elem_value *ucontrol)
 {
 	switch (ucontrol->value.integer.value[0]) {
-	case 0:
+	case 8000:
 		mdm9615_btsco_rate = SAMPLE_RATE_8KHZ;
 		break;
-	case 1:
+	case 16000:
 		mdm9615_btsco_rate = SAMPLE_RATE_16KHZ;
 		break;
 	default:
diff --git a/sound/soc/msm/mpq8064.c b/sound/soc/msm/mpq8064.c
index 6685ce5..f5bbf56 100644
--- a/sound/soc/msm/mpq8064.c
+++ b/sound/soc/msm/mpq8064.c
@@ -1165,8 +1165,6 @@
 		.platform_name  = "msm-pcm-afe",
 		.ignore_suspend = 1,
 		.ignore_pmdown_time = 1, /* this dainlink has playback support */
-		.codec_dai_name = "snd-soc-dummy-dai",
-		.codec_name = "snd-soc-dummy",
 	},
 	{
 		.name = "MSM AFE-PCM TX",
@@ -1176,8 +1174,6 @@
 		.codec_dai_name = "msm-stub-tx",
 		.platform_name  = "msm-pcm-afe",
 		.ignore_suspend = 1,
-		.codec_dai_name = "snd-soc-dummy-dai",
-		.codec_name = "snd-soc-dummy",
 	},
 	{
 		.name = "MSM8960 Compr1",
diff --git a/sound/soc/msm/msm-dai-q6.c b/sound/soc/msm/msm-dai-q6.c
index fb74c0a..89b709f 100644
--- a/sound/soc/msm/msm-dai-q6.c
+++ b/sound/soc/msm/msm-dai-q6.c
@@ -1005,6 +1005,7 @@
 		case VOICE_RECORD_TX:
 		case VOICE_RECORD_RX:
 			rc = afe_start_pseudo_port(dai->id);
+			break;
 		default:
 			rc = afe_port_start(dai->id, &dai_data->port_config,
 					    dai_data->rate);
diff --git a/sound/soc/msm/msm-pcm-routing.c b/sound/soc/msm/msm-pcm-routing.c
index c0c679d..da3d335 100644
--- a/sound/soc/msm/msm-pcm-routing.c
+++ b/sound/soc/msm/msm-pcm-routing.c
@@ -2499,6 +2499,9 @@
 	{"SEC_AUX_PCM_TX", NULL, "BE_IN"},
 	{"BE_OUT", NULL, "AUX_PCM_RX"},
 	{"AUX_PCM_TX", NULL, "BE_IN"},
+	{"INCALL_RECORD_TX", NULL, "BE_IN"},
+	{"INCALL_RECORD_RX", NULL, "BE_IN"},
+	{"BE_OUT", NULL, "VOICE_PLAYBACK_TX"},
 };
 
 static int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/soc/msm/msm8930.c b/sound/soc/msm/msm8930.c
index 374e875..e86db10 100644
--- a/sound/soc/msm/msm8930.c
+++ b/sound/soc/msm/msm8930.c
@@ -422,11 +422,12 @@
 static int msm8930_btsco_rate_put(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
+
 	switch (ucontrol->value.integer.value[0]) {
-	case 0:
+	case 8000:
 		msm8930_btsco_rate = BTSCO_RATE_8KHZ;
 		break;
-	case 1:
+	case 16000:
 		msm8930_btsco_rate = BTSCO_RATE_16KHZ;
 		break;
 	default:
diff --git a/sound/soc/msm/msm8960.c b/sound/soc/msm/msm8960.c
index 5f8a63e..b10a7ea 100644
--- a/sound/soc/msm/msm8960.c
+++ b/sound/soc/msm/msm8960.c
@@ -609,10 +609,10 @@
 				struct snd_ctl_elem_value *ucontrol)
 {
 	switch (ucontrol->value.integer.value[0]) {
-	case 0:
+	case 8000:
 		msm8960_btsco_rate = SAMPLE_RATE_8KHZ;
 		break;
-	case 1:
+	case 16000:
 		msm8960_btsco_rate = SAMPLE_RATE_16KHZ;
 		break;
 	default:
diff --git a/sound/soc/msm/msm8974.c b/sound/soc/msm/msm8974.c
index 9c57161..3516022 100644
--- a/sound/soc/msm/msm8974.c
+++ b/sound/soc/msm/msm8974.c
@@ -26,7 +26,7 @@
 #include <asm/mach-types.h>
 #include <mach/socinfo.h>
 #include <qdsp6v2/msm-pcm-routing-v2.h>
-#include "../codecs/wcd9310.h"
+#include "../codecs/wcd9320.h"
 
 /* 8974 machine driver */
 
@@ -81,21 +81,7 @@
 static struct snd_soc_jack hs_jack;
 static struct snd_soc_jack button_jack;
 
-static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
-				    bool dapm);
-
-static struct tabla_mbhc_config mbhc_cfg = {
-	.headset_jack = &hs_jack,
-	.button_jack = &button_jack,
-	.read_fw_bin = false,
-	.calibration = NULL,
-	.micbias = TABLA_MICBIAS2,
-	.mclk_cb_fn = msm_enable_codec_ext_clk,
-	.mclk_rate = TABLA_EXT_CLK_RATE,
-	.gpio = 0, /* MBHC GPIO is not configured */
-	.gpio_irq = 0,
-	.gpio_level_insert = 1,
-};
+static struct mutex cdc_mclk_mutex;
 
 static void msm_enable_ext_spk_amp_gpio(u32 spk_amp_gpio)
 {
@@ -244,6 +230,8 @@
 {
 	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	mutex_lock(&dapm->codec->mutex);
+
 	pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control);
 	if (msm_spk_control == MSM8974_SPK_ON) {
 		snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos");
@@ -258,17 +246,19 @@
 	}
 
 	snd_soc_dapm_sync(dapm);
+	mutex_unlock(&dapm->codec->mutex);
 }
 
 static int msm_get_spk(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
+		       struct snd_ctl_elem_value *ucontrol)
 {
 	pr_debug("%s: msm_spk_control = %d", __func__, msm_spk_control);
 	ucontrol->value.integer.value[0] = msm_spk_control;
 	return 0;
 }
+
 static int msm_set_spk(struct snd_kcontrol *kcontrol,
-	struct snd_ctl_elem_value *ucontrol)
+		       struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
 
@@ -280,8 +270,9 @@
 	msm_ext_control(codec);
 	return 1;
 }
+
 static int msm_spkramp_event(struct snd_soc_dapm_widget *w,
-	struct snd_kcontrol *k, int event)
+			     struct snd_kcontrol *k, int event)
 {
 	pr_debug("%s() %x\n", __func__, SND_SOC_DAPM_EVENT_ON(event));
 
@@ -318,12 +309,6 @@
 	return 0;
 }
 
-static int msm_enable_codec_ext_clk(struct snd_soc_codec *codec, int enable,
-				    bool dapm)
-{
-	return 0;
-}
-
 static int msm_mclk_event(struct snd_soc_dapm_widget *w,
 		struct snd_kcontrol *kcontrol, int event)
 {
@@ -453,14 +438,14 @@
 
 static const char *const btsco_rate_text[] = {"8000", "16000"};
 static const struct soc_enum msm_btsco_enum[] = {
-		SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
+	SOC_ENUM_SINGLE_EXT(2, btsco_rate_text),
 };
 
 static int msm_slim_0_rx_ch_get(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
 	pr_debug("%s: msm_slim_0_rx_ch  = %d\n", __func__,
-			msm_slim_0_rx_ch);
+		 msm_slim_0_rx_ch);
 	ucontrol->value.integer.value[0] = msm_slim_0_rx_ch - 1;
 	return 0;
 }
@@ -471,7 +456,7 @@
 	msm_slim_0_rx_ch = ucontrol->value.integer.value[0] + 1;
 
 	pr_debug("%s: msm_slim_0_rx_ch = %d\n", __func__,
-			msm_slim_0_rx_ch);
+		 msm_slim_0_rx_ch);
 	return 1;
 }
 
@@ -479,7 +464,7 @@
 	struct snd_ctl_elem_value *ucontrol)
 {
 	pr_debug("%s: msm_slim_0_tx_ch  = %d\n", __func__,
-			msm_slim_0_tx_ch);
+		 msm_slim_0_tx_ch);
 	ucontrol->value.integer.value[0] = msm_slim_0_tx_ch - 1;
 	return 0;
 }
@@ -489,16 +474,14 @@
 {
 	msm_slim_0_tx_ch = ucontrol->value.integer.value[0] + 1;
 
-	pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__,
-			msm_slim_0_tx_ch);
+	pr_debug("%s: msm_slim_0_tx_ch = %d\n", __func__, msm_slim_0_tx_ch);
 	return 1;
 }
 
 static int msm_btsco_rate_get(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_value *ucontrol)
 {
-	pr_debug("%s: msm_btsco_rate  = %d", __func__,
-					msm_btsco_rate);
+	pr_debug("%s: msm_btsco_rate  = %d", __func__, msm_btsco_rate);
 	ucontrol->value.integer.value[0] = msm_btsco_rate;
 	return 0;
 }
@@ -517,33 +500,23 @@
 		msm_btsco_rate = BTSCO_RATE_8KHZ;
 		break;
 	}
-	pr_debug("%s: msm_btsco_rate = %d\n", __func__,
-					msm_btsco_rate);
+	pr_debug("%s: msm_btsco_rate = %d\n", __func__, msm_btsco_rate);
 	return 0;
 }
 
-static const struct snd_kcontrol_new tabla_msm_controls[] = {
-	SOC_ENUM_EXT("Speaker Function", msm_enum[0], msm_get_spk,
-		msm_set_spk),
-	SOC_ENUM_EXT("SLIM_0_RX Channels", msm_enum[1],
-		msm_slim_0_rx_ch_get, msm_slim_0_rx_ch_put),
-	SOC_ENUM_EXT("SLIM_0_TX Channels", msm_enum[2],
-		msm_slim_0_tx_ch_get, msm_slim_0_tx_ch_put),
-};
-
 static const struct snd_kcontrol_new int_btsco_rate_mixer_controls[] = {
 	SOC_ENUM_EXT("Internal BTSCO SampleRate", msm_btsco_enum[0],
-		msm_btsco_rate_get, msm_btsco_rate_put),
+		     msm_btsco_rate_get, msm_btsco_rate_put),
 };
 
 static int msm_auxpcm_be_params_fixup(struct snd_soc_pcm_runtime *rtd,
 					struct snd_pcm_hw_params *params)
 {
-	struct snd_interval *rate = hw_param_interval(params,
-					SNDRV_PCM_HW_PARAM_RATE);
+	struct snd_interval *rate =
+	    hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
 
-	struct snd_interval *channels = hw_param_interval(params,
-					SNDRV_PCM_HW_PARAM_CHANNELS);
+	struct snd_interval *channels =
+	    hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
 
 	/* PCM only supports mono output with 8khz sample rate */
 	rate->min = rate->max = 8000;
@@ -629,6 +602,193 @@
 	.startup = msm_auxpcm_startup,
 	.shutdown = msm_auxpcm_shutdown,
 };
+
+static int msm_slim_0_rx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+					    struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+	SNDRV_PCM_HW_PARAM_RATE);
+
+	struct snd_interval *channels =
+	    hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	pr_debug("%s()\n", __func__);
+	rate->min = rate->max = 48000;
+	channels->min = channels->max = msm_slim_0_rx_ch;
+
+	return 0;
+}
+
+static int msm_slim_0_tx_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+					    struct snd_pcm_hw_params *params)
+{
+	struct snd_interval *rate = hw_param_interval(params,
+	SNDRV_PCM_HW_PARAM_RATE);
+
+	struct snd_interval *channels = hw_param_interval(params,
+			SNDRV_PCM_HW_PARAM_CHANNELS);
+
+	pr_debug("%s()\n", __func__);
+	rate->min = rate->max = 48000;
+	channels->min = channels->max = msm_slim_0_tx_ch;
+
+	return 0;
+}
+
+static const struct soc_enum msm_snd_enum[] = {
+	SOC_ENUM_SINGLE_EXT(2, spk_function),
+	SOC_ENUM_SINGLE_EXT(2, slim0_rx_ch_text),
+	SOC_ENUM_SINGLE_EXT(4, slim0_tx_ch_text),
+};
+
+static const struct snd_kcontrol_new msm_snd_controls[] = {
+	SOC_ENUM_EXT("Speaker Function", msm_snd_enum[0], msm_get_spk,
+		     msm_set_spk),
+	SOC_ENUM_EXT("SLIM_0_RX Channels", msm_snd_enum[1],
+		     msm_slim_0_rx_ch_get, msm_slim_0_rx_ch_put),
+	SOC_ENUM_EXT("SLIM_0_TX Channels", msm_snd_enum[2],
+		     msm_slim_0_tx_ch_get, msm_slim_0_tx_ch_put),
+};
+
+static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
+{
+	int err;
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+
+	pr_info("%s(), dev_name%s\n", __func__, dev_name(cpu_dai->dev));
+
+	if (machine_is_msm8960_liquid()) {
+		top_spk_pamp_gpio = (PM8921_GPIO_PM_TO_SYS(19));
+		bottom_spk_pamp_gpio = (PM8921_GPIO_PM_TO_SYS(18));
+	}
+
+	rtd->pmdown_time = 0;
+
+	err = snd_soc_add_codec_controls(codec, msm_snd_controls,
+					 ARRAY_SIZE(msm_snd_controls));
+	if (err < 0)
+		return err;
+
+	snd_soc_dapm_new_controls(dapm, msm_dapm_widgets,
+				ARRAY_SIZE(msm_dapm_widgets));
+
+	snd_soc_dapm_add_routes(dapm, common_audio_map,
+		ARRAY_SIZE(common_audio_map));
+
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Pos");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk Bottom Neg");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Pos");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk Top Neg");
+
+	snd_soc_dapm_sync(dapm);
+
+	err = snd_soc_jack_new(codec, "Headset Jack",
+			       (SND_JACK_HEADSET | SND_JACK_OC_HPHL |
+				SND_JACK_OC_HPHR | SND_JACK_UNSUPPORTED),
+			       &hs_jack);
+	if (err) {
+		pr_err("failed to create new jack\n");
+		return err;
+	}
+
+	err = snd_soc_jack_new(codec, "Button Jack",
+			       TAIKO_JACK_BUTTON_MASK, &button_jack);
+	if (err) {
+		pr_err("failed to create new jack\n");
+		return err;
+	}
+
+	return err;
+}
+
+static int msm_snd_startup(struct snd_pcm_substream *substream)
+{
+	pr_debug("%s(): substream = %s  stream = %d\n", __func__,
+		 substream->name, substream->stream);
+	return 0;
+}
+
+static int msm_snd_hw_params(struct snd_pcm_substream *substream,
+			     struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret = 0;
+	unsigned int rx_ch[SLIM_MAX_RX_PORTS], tx_ch[SLIM_MAX_TX_PORTS];
+	unsigned int rx_ch_cnt = 0, tx_ch_cnt = 0;
+	unsigned int user_set_tx_ch = 0;
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		pr_debug("%s: rx_0_ch=%d\n", __func__, msm_slim_0_rx_ch);
+		ret = snd_soc_dai_get_channel_map(codec_dai,
+					&tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
+		if (ret < 0) {
+			pr_err("%s: failed to get codec chan map\n", __func__);
+			goto end;
+		}
+
+		ret = snd_soc_dai_set_channel_map(cpu_dai, 0, 0,
+						  msm_slim_0_rx_ch, rx_ch);
+		if (ret < 0) {
+			pr_err("%s: failed to set cpu chan map\n", __func__);
+			goto end;
+		}
+		ret = snd_soc_dai_set_channel_map(codec_dai, 0, 0,
+						  msm_slim_0_rx_ch, rx_ch);
+		if (ret < 0) {
+			pr_err("%s: failed to set codec channel map\n",
+			       __func__);
+			goto end;
+		}
+	} else {
+
+		if (codec_dai->id == 2)
+			user_set_tx_ch = msm_slim_0_tx_ch;
+		else if (codec_dai->id == 4)
+			user_set_tx_ch = params_channels(params);
+
+		pr_debug("%s: %s_tx_dai_id_%d_ch=%d\n", __func__,
+			 codec_dai->name, codec_dai->id, user_set_tx_ch);
+
+		ret = snd_soc_dai_get_channel_map(codec_dai,
+					 &tx_ch_cnt, tx_ch, &rx_ch_cnt , rx_ch);
+		if (ret < 0) {
+			pr_err("%s: failed to get codec chan map\n", __func__);
+			goto end;
+		}
+		ret = snd_soc_dai_set_channel_map(cpu_dai,
+						  user_set_tx_ch, tx_ch, 0 , 0);
+		if (ret < 0) {
+			pr_err("%s: failed to set cpu chan map\n", __func__);
+			goto end;
+		}
+		ret = snd_soc_dai_set_channel_map(codec_dai,
+						  user_set_tx_ch, tx_ch, 0, 0);
+		if (ret < 0) {
+			pr_err("%s: failed to set codec channel map\n",
+			       __func__);
+			goto end;
+		}
+	}
+end:
+	return ret;
+}
+
+static void msm_snd_shutdown(struct snd_pcm_substream *substream)
+{
+	pr_debug("%s(): substream = %s stream = %d\n", __func__,
+		 substream->name, substream->stream);
+}
+
+static struct snd_soc_ops msm8974_be_ops = {
+	.startup = msm_snd_startup,
+	.hw_params = msm_snd_hw_params,
+	.shutdown = msm_snd_shutdown,
+};
+
 /* Digital audio interface glue - connects codec <---> CPU */
 static struct snd_soc_dai_link msm_dai[] = {
 	/* FrontEnd DAI Links */
@@ -734,7 +894,33 @@
 		.be_id = MSM_BACKEND_DAI_AUXPCM_TX,
 		.be_hw_params_fixup = msm_auxpcm_be_params_fixup,
 	},
-
+	/* Backend DAI Links */
+	{
+		.name = LPASS_BE_SLIMBUS_0_RX,
+		.stream_name = "Slimbus Playback",
+		.cpu_dai_name = "msm-dai-q6-dev.16384",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "taiko_codec",
+		.codec_dai_name	= "taiko_rx1",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_0_RX,
+		.init = &msm_audrx_init,
+		.be_hw_params_fixup = msm_slim_0_rx_be_hw_params_fixup,
+		.ops = &msm8974_be_ops,
+		.ignore_pmdown_time = 1, /* dai link has playback support */
+	},
+	{
+		.name = LPASS_BE_SLIMBUS_0_TX,
+		.stream_name = "Slimbus Capture",
+		.cpu_dai_name = "msm-dai-q6-dev.16385",
+		.platform_name = "msm-pcm-routing",
+		.codec_name = "taiko_codec",
+		.codec_dai_name	= "taiko_tx1",
+		.no_pcm = 1,
+		.be_id = MSM_BACKEND_DAI_SLIMBUS_0_TX,
+		.be_hw_params_fixup = msm_slim_0_tx_be_hw_params_fixup,
+		.ops = &msm8974_be_ops,
+	},
 };
 
 struct snd_soc_card snd_soc_card_msm = {
@@ -756,6 +942,8 @@
 static int __init msm_audio_init(void)
 {
 	int ret = 0;
+
+	mutex_init(&cdc_mclk_mutex);
 	if (!machine_is_msm8974_sim()) {
 		pr_err("%s: Not the right machine type\n", __func__);
 		return -ENODEV;
@@ -763,7 +951,6 @@
 	msm_snd_device = platform_device_alloc("soc-audio", 0);
 	if (!msm_snd_device) {
 		pr_err("Platform device allocation failed\n");
-		kfree(mbhc_cfg.calibration);
 		return -ENOMEM;
 	}
 
@@ -771,7 +958,6 @@
 	ret = platform_device_add(msm_snd_device);
 	if (ret) {
 		platform_device_put(msm_snd_device);
-		kfree(mbhc_cfg.calibration);
 		return ret;
 	}
 	return ret;
@@ -787,7 +973,6 @@
 	}
 	msm_free_headset_mic_gpios();
 	platform_device_unregister(msm_snd_device);
-	kfree(mbhc_cfg.calibration);
 }
 module_exit(msm_audio_exit);
 
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index ff2cc8d..acb073d 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -1,4 +1,6 @@
 snd-soc-qdsp6v2-objs += msm-dai-q6-v2.o msm-pcm-q6-v2.o msm-pcm-routing-v2.o msm-compr-q6-v2.o  msm-multi-ch-pcm-q6-v2.o
 snd-soc-qdsp6v2-objs += msm-pcm-lpa-v2.o msm-pcm-afe-v2.o msm-pcm-voip-v2.o msm-pcm-voice-v2.o
 obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o
-obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o q6voice.o
+obj-y += q6adm.o q6afe.o q6asm.o q6audio-v2.o q6voice.o q6core.o
+ocmem-audio-objs += audio_ocmem.o
+obj-$(CONFIG_AUDIO_OCMEM) += ocmem-audio.o
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.c b/sound/soc/msm/qdsp6v2/audio_ocmem.c
new file mode 100644
index 0000000..86a82e2
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.c
@@ -0,0 +1,672 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <asm/mach-types.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/ocmem.h>
+#include "q6core.h"
+#include "audio_ocmem.h"
+
+#define AUDIO_OCMEM_BUF_SIZE (512 * SZ_1K)
+
+enum {
+	OCMEM_STATE_ALLOC = 1,
+	OCMEM_STATE_MAP_TRANSITION,
+	OCMEM_STATE_MAP_COMPL,
+	OCMEM_STATE_UNMAP_TRANSITION,
+	OCMEM_STATE_UNMAP_COMPL,
+	OCMEM_STATE_SHRINK,
+	OCMEM_STATE_GROW,
+	OCMEM_STATE_FREE,
+	OCMEM_STATE_MAP_FAIL,
+	OCMEM_STATE_UNMAP_FAIL,
+	OCMEM_STATE_EXIT,
+};
+static void audio_ocmem_process_workdata(struct work_struct *work);
+
+struct audio_ocmem_workdata {
+	int	id;
+	bool    en;
+	struct work_struct work;
+};
+
+struct voice_ocmem_workdata {
+	int id;
+	bool en;
+	struct work_struct work;
+};
+
+struct audio_ocmem_prv {
+	atomic_t audio_state;
+	struct ocmem_notifier *audio_hdl;
+	struct ocmem_buf *buf;
+	uint32_t audio_ocmem_bus_client;
+	struct ocmem_map_list mlist;
+	struct avcs_cmd_rsp_get_low_power_segments_info_t *lp_memseg_ptr;
+	wait_queue_head_t audio_wait;
+	atomic_t  audio_cond;
+	atomic_t  audio_exit;
+	spinlock_t audio_lock;
+	struct workqueue_struct *audio_ocmem_workqueue;
+	struct workqueue_struct *voice_ocmem_workqueue;
+};
+
+static struct audio_ocmem_prv audio_ocmem_lcl;
+
+
+static int audio_ocmem_client_cb(struct notifier_block *this,
+		 unsigned long event1, void *data)
+{
+	int rc = NOTIFY_DONE;
+	unsigned long flags;
+
+	pr_debug("%s: event[%ld] cur state[%x]\n", __func__,
+			event1, atomic_read(&audio_ocmem_lcl.audio_state));
+
+	spin_lock_irqsave(&audio_ocmem_lcl.audio_lock, flags);
+	switch (event1) {
+	case OCMEM_MAP_DONE:
+		pr_debug("%s: map done\n", __func__);
+		atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_COMPL);
+		break;
+	case OCMEM_MAP_FAIL:
+		pr_debug("%s: map fail\n", __func__);
+		atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_FAIL);
+		break;
+	case OCMEM_UNMAP_DONE:
+		pr_debug("%s: unmap done\n", __func__);
+		atomic_set(&audio_ocmem_lcl.audio_state,
+				OCMEM_STATE_UNMAP_COMPL);
+		break;
+	case OCMEM_UNMAP_FAIL:
+		pr_debug("%s: unmap fail\n", __func__);
+		atomic_set(&audio_ocmem_lcl.audio_state,
+				OCMEM_STATE_UNMAP_FAIL);
+		break;
+	case OCMEM_ALLOC_GROW:
+		audio_ocmem_lcl.buf = data;
+		atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_GROW);
+		break;
+	case OCMEM_ALLOC_SHRINK:
+		atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_SHRINK);
+		break;
+	default:
+		pr_err("%s: Invalid event[%ld]\n", __func__, event1);
+		break;
+	}
+	spin_unlock_irqrestore(&audio_ocmem_lcl.audio_lock, flags);
+	if (atomic_read(&audio_ocmem_lcl.audio_cond)) {
+		atomic_set(&audio_ocmem_lcl.audio_cond, 0);
+		wake_up(&audio_ocmem_lcl.audio_wait);
+	}
+	return rc;
+}
+
+/**
+ * audio_ocmem_enable() - Exercise OCMEM for audio
+ * @cid:	client id - OCMEM_LP_AUDIO
+ *
+ * OCMEM gets allocated for audio usecase and the low power
+ * segments obtained from the DSP will be moved from/to main
+ * memory to OCMEM. Shrink and grow requests will be received
+ * and processed accordingly based on the current audio state.
+ */
+int audio_ocmem_enable(int cid)
+{
+	int ret;
+	int i, j;
+	struct ocmem_buf *buf = NULL;
+	struct avcs_cmd_rsp_get_low_power_segments_info_t *lp_segptr;
+
+	pr_debug("%s\n", __func__);
+	/* Non-blocking ocmem allocate (asynchronous) */
+	buf = ocmem_allocate_nb(cid, AUDIO_OCMEM_BUF_SIZE);
+	if (IS_ERR_OR_NULL(buf)) {
+		pr_err("%s: failed: %d\n", __func__, cid);
+		return -ENOMEM;
+	}
+	atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_ALLOC);
+
+	audio_ocmem_lcl.buf = buf;
+	atomic_set(&audio_ocmem_lcl.audio_exit, 0);
+	if (!buf->len) {
+		wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+			(atomic_read(&audio_ocmem_lcl.audio_cond) == 0)	||
+			(atomic_read(&audio_ocmem_lcl.audio_exit) == 1));
+
+		if (atomic_read(&audio_ocmem_lcl.audio_exit)) {
+			pr_err("%s: audio playback ended while waiting for ocmem\n",
+					__func__);
+			ret = -EINVAL;
+			goto fail_cmd;
+		}
+	}
+	if (audio_ocmem_lcl.lp_memseg_ptr == NULL) {
+		/* Retrieve low power segments */
+		ret = core_get_low_power_segments(
+					&audio_ocmem_lcl.lp_memseg_ptr);
+		if (ret != 0) {
+			pr_err("%s: get low power segments from DSP failed, rc=%d\n",
+					__func__, ret);
+			goto fail_cmd;
+		}
+	}
+	lp_segptr = audio_ocmem_lcl.lp_memseg_ptr;
+	audio_ocmem_lcl.mlist.num_chunks = lp_segptr->num_segments;
+	for (i = 0, j = 0; j < audio_ocmem_lcl.mlist.num_chunks; j++, i++) {
+		audio_ocmem_lcl.mlist.chunks[j].ro =
+			(lp_segptr->mem_segment[i].type == READ_ONLY_SEGMENT);
+		audio_ocmem_lcl.mlist.chunks[j].ddr_paddr =
+			lp_segptr->mem_segment[i].start_address_lsw;
+		audio_ocmem_lcl.mlist.chunks[j].size =
+			lp_segptr->mem_segment[i].size;
+		pr_debug("%s: ro:%d, ddr_paddr[%x], size[%x]\n", __func__,
+			audio_ocmem_lcl.mlist.chunks[j].ro,
+			(uint32_t)audio_ocmem_lcl.mlist.chunks[j].ddr_paddr,
+			(uint32_t)audio_ocmem_lcl.mlist.chunks[j].size);
+	}
+
+	/* vote for ocmem bus bandwidth */
+	ret = msm_bus_scale_client_update_request(
+				audio_ocmem_lcl.audio_ocmem_bus_client,
+				0);
+	if (ret)
+		pr_err("%s: failed to vote for bus bandwidth\n", __func__);
+
+	atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_MAP_TRANSITION);
+
+	ret = ocmem_map(cid, audio_ocmem_lcl.buf, &audio_ocmem_lcl.mlist);
+	if (ret) {
+		pr_err("%s: ocmem_map failed\n", __func__);
+		goto fail_cmd;
+	}
+
+
+	while ((atomic_read(&audio_ocmem_lcl.audio_state) !=
+						OCMEM_STATE_EXIT)) {
+
+		wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+				atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+
+		switch (atomic_read(&audio_ocmem_lcl.audio_state)) {
+		case OCMEM_STATE_MAP_COMPL:
+			pr_debug("%s: audio_cond[0x%x], audio_state[0x%x]\n",
+			__func__, atomic_read(&audio_ocmem_lcl.audio_cond),
+			atomic_read(&audio_ocmem_lcl.audio_state));
+			atomic_set(&audio_ocmem_lcl.audio_state,
+					OCMEM_STATE_MAP_COMPL);
+			atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+			break;
+		case OCMEM_STATE_SHRINK:
+			atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+			ret = ocmem_unmap(cid, audio_ocmem_lcl.buf,
+					&audio_ocmem_lcl.mlist);
+			if (ret) {
+				pr_err("%s: ocmem_unmap failed, state[%d]\n",
+				__func__,
+				atomic_read(&audio_ocmem_lcl.audio_state));
+				goto fail_cmd;
+			}
+
+			atomic_set(&audio_ocmem_lcl.audio_state,
+					OCMEM_STATE_UNMAP_TRANSITION);
+			wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+				atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+			atomic_set(&audio_ocmem_lcl.audio_state,
+					OCMEM_STATE_UNMAP_COMPL);
+			ret = ocmem_shrink(cid, audio_ocmem_lcl.buf, 0);
+			if (ret) {
+				pr_err("%s: ocmem_shrink failed, state[%d]\n",
+				__func__,
+				atomic_read(&audio_ocmem_lcl.audio_state));
+				goto fail_cmd;
+			}
+
+			break;
+		case OCMEM_STATE_GROW:
+			atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+			ret = ocmem_map(cid, audio_ocmem_lcl.buf,
+						&audio_ocmem_lcl.mlist);
+			if (ret) {
+				pr_err("%s: ocmem_map failed, state[%d]\n",
+				__func__,
+				atomic_read(&audio_ocmem_lcl.audio_state));
+				goto fail_cmd;
+			}
+			atomic_set(&audio_ocmem_lcl.audio_state,
+				OCMEM_STATE_MAP_TRANSITION);
+			wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+				atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+			atomic_set(&audio_ocmem_lcl.audio_state,
+				OCMEM_STATE_MAP_COMPL);
+			break;
+		}
+	}
+fail_cmd:
+	pr_debug("%s: exit\n", __func__);
+	return ret;
+}
+
+/**
+ * audio_ocmem_disable() - Disable OCMEM for audio
+ * @cid:	client id - OCMEM_LP_AUDIO
+ *
+ * OCMEM gets deallocated for audio usecase. Depending on
+ * current audio state, OCMEM will be freed from using audio
+ * segments.
+ */
+int audio_ocmem_disable(int cid)
+{
+	int ret;
+
+	if (atomic_read(&audio_ocmem_lcl.audio_cond))
+		atomic_set(&audio_ocmem_lcl.audio_cond, 0);
+	pr_debug("%s: audio_cond[0x%x], audio_state[0x%x]\n", __func__,
+			 atomic_read(&audio_ocmem_lcl.audio_cond),
+			 atomic_read(&audio_ocmem_lcl.audio_state));
+	switch (atomic_read(&audio_ocmem_lcl.audio_state)) {
+	case OCMEM_STATE_MAP_COMPL:
+		atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+		ret = ocmem_unmap(cid, audio_ocmem_lcl.buf,
+					&audio_ocmem_lcl.mlist);
+		if (ret) {
+			pr_err("%s: ocmem_unmap failed, state[%d]\n",
+				__func__,
+				atomic_read(&audio_ocmem_lcl.audio_state));
+			goto fail_cmd;
+		}
+
+		atomic_set(&audio_ocmem_lcl.audio_state, OCMEM_STATE_EXIT);
+
+		wait_event_interruptible(audio_ocmem_lcl.audio_wait,
+				atomic_read(&audio_ocmem_lcl.audio_cond) == 0);
+	case OCMEM_STATE_UNMAP_COMPL:
+		ret = ocmem_free(OCMEM_LP_AUDIO, audio_ocmem_lcl.buf);
+		if (ret) {
+			pr_err("%s: ocmem_free failed, state[%d]\n",
+				__func__,
+				atomic_read(&audio_ocmem_lcl.audio_state));
+			goto fail_cmd;
+		}
+		pr_debug("%s: ocmem_free success\n", __func__);
+	default:
+		pr_debug("%s: state=%d", __func__,
+			atomic_read(&audio_ocmem_lcl.audio_state));
+		break;
+
+	}
+	return 0;
+fail_cmd:
+	return ret;
+}
+
+static void voice_ocmem_process_workdata(struct work_struct *work)
+{
+	int cid;
+	bool en;
+	int rc = 0;
+
+	struct voice_ocmem_workdata *voice_ocm_work =
+		container_of(work, struct voice_ocmem_workdata, work);
+
+	en = voice_ocm_work->en;
+	switch (voice_ocm_work->id) {
+	case VOICE:
+		cid = OCMEM_VOICE;
+		if (en)
+			disable_ocmem_for_voice(cid);
+		else
+			enable_ocmem_after_voice(cid);
+		break;
+	default:
+		pr_err("%s: Invalid client id[%d]\n", __func__,
+					voice_ocm_work->id);
+		rc = -EINVAL;
+	}
+
+}
+/**
+ * voice_ocmem_process_req() - disable/enable OCMEM during voice call
+ * @cid:	client id - VOICE
+ * @enable:	1 - enable
+ *		0 - disable
+ *
+ * This configures OCMEM during start of voice call. If any
+ * audio clients are already using OCMEM, they will be evicted
+ * out of OCMEM during voice call and get restored after voice
+ * call.
+ */
+int voice_ocmem_process_req(int cid, bool enable)
+{
+
+	struct voice_ocmem_workdata *workdata = NULL;
+
+	if (audio_ocmem_lcl.voice_ocmem_workqueue == NULL) {
+		pr_err("%s: voice ocmem workqueue is NULL\n", __func__);
+		return -EINVAL;
+	}
+	workdata = kzalloc(sizeof(struct voice_ocmem_workdata),
+						GFP_ATOMIC);
+	if (workdata == NULL) {
+		pr_err("%s: mem failure\n", __func__);
+		return -ENOMEM;
+	}
+	workdata->id = cid;
+	workdata->en = enable;
+
+	INIT_WORK(&workdata->work, voice_ocmem_process_workdata);
+	queue_work(audio_ocmem_lcl.voice_ocmem_workqueue, &workdata->work);
+
+	return 0;
+}
+
+/**
+ * disable_ocmem_for_voice() - disable OCMEM during voice call
+ * @cid:        client id - OCMEM_VOICE
+ *
+ * This configures OCMEM during start of voice call. If any
+ * audio clients are already using OCMEM, they will be evicted
+ */
+int disable_ocmem_for_voice(int cid)
+{
+	int ret;
+
+	ret = ocmem_evict(cid);
+	if (ret)
+		pr_err("%s: ocmem_evict is not successful\n", __func__);
+	return ret;
+}
+
+/**
+ * enable_ocmem_for_voice() - To enable OCMEM after voice call
+ * @cid:	client id - OCMEM_VOICE
+ *
+ * OCMEM gets re-enabled after OCMEM voice call. If other client
+ * is evicted out of OCMEM, that gets restored and remapped in
+ * OCMEM after the voice call.
+ */
+int enable_ocmem_after_voice(int cid)
+{
+	int ret;
+
+	ret = ocmem_restore(cid);
+	if (ret)
+		pr_err("%s: ocmem_restore is not successful\n", __func__);
+	return ret;
+}
+
+
+static void audio_ocmem_process_workdata(struct work_struct *work)
+{
+	int cid;
+	bool en;
+	int rc = 0;
+
+	struct audio_ocmem_workdata *audio_ocm_work =
+			container_of(work, struct audio_ocmem_workdata, work);
+
+	en = audio_ocm_work->en;
+	switch (audio_ocm_work->id) {
+	case AUDIO:
+		cid = OCMEM_LP_AUDIO;
+		if (en)
+			audio_ocmem_enable(cid);
+		else
+			audio_ocmem_disable(cid);
+		break;
+	default:
+		pr_err("%s: Invalid client id[%d]\n", __func__,
+					audio_ocm_work->id);
+		rc = -EINVAL;
+	}
+
+}
+
+/**
+ * audio_ocmem_process_req() - process audio request to use OCMEM
+ * @id:		client id - OCMEM_LP_AUDIO
+ * @enable:	enable or disable OCMEM
+ *
+ * A workqueue gets created and initialized to use OCMEM for
+ * audio clients.
+ */
+int audio_ocmem_process_req(int id, bool enable)
+{
+	struct audio_ocmem_workdata *workdata = NULL;
+
+	if (audio_ocmem_lcl.audio_ocmem_workqueue == NULL) {
+		pr_err("%s: audio ocmem workqueue is NULL\n", __func__);
+		return -EINVAL;
+	}
+	workdata = kzalloc(sizeof(struct audio_ocmem_workdata),
+							GFP_ATOMIC);
+	if (workdata == NULL) {
+		pr_err("%s: mem failure\n", __func__);
+		return -ENOMEM;
+	}
+	workdata->id = id;
+	workdata->en = enable;
+
+	/* if previous work waiting for ocmem - signal it to exit */
+	atomic_set(&audio_ocmem_lcl.audio_exit, 1);
+
+	INIT_WORK(&workdata->work, audio_ocmem_process_workdata);
+	queue_work(audio_ocmem_lcl.audio_ocmem_workqueue, &workdata->work);
+
+	return 0;
+}
+
+
+static struct notifier_block audio_ocmem_client_nb = {
+	.notifier_call = audio_ocmem_client_cb,
+};
+
+static int audio_ocmem_platform_data_populate(struct platform_device *pdev)
+{
+	int ret;
+	struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
+	struct msm_bus_vectors *audio_ocmem_bus_vectors = NULL;
+	struct msm_bus_paths *ocmem_audio_bus_paths = NULL;
+	u32 val;
+
+	if (!pdev->dev.of_node) {
+		pr_err("%s: device tree information missing\n", __func__);
+		return -ENODEV;
+	}
+
+	audio_ocmem_bus_vectors = kzalloc(sizeof(struct msm_bus_vectors),
+								GFP_KERNEL);
+	if (!audio_ocmem_bus_vectors) {
+		dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+		return -ENOMEM;
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,msm-ocmem-audio-src-id", &val);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-src-id missing in DT node\n",
+				__func__);
+		goto fail1;
+	}
+	audio_ocmem_bus_vectors->src = val;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,msm-ocmem-audio-dst-id", &val);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-dst-id missing in DT node\n",
+				__func__);
+		goto fail1;
+	}
+	audio_ocmem_bus_vectors->dst = val;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,msm-ocmem-audio-ab", &val);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-ab missing in DT node\n",
+					__func__);
+		goto fail1;
+	}
+	audio_ocmem_bus_vectors->ab = val;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				"qcom,msm-ocmem-audio-ib", &val);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: qcom,msm-ocmem-audio-ib missing in DT node\n",
+					__func__);
+		goto fail1;
+	}
+	audio_ocmem_bus_vectors->ib = val;
+
+	ocmem_audio_bus_paths = kzalloc(sizeof(struct msm_bus_paths),
+								GFP_KERNEL);
+	if (!ocmem_audio_bus_paths) {
+		dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+		goto fail1;
+	}
+	ocmem_audio_bus_paths->num_paths = 1;
+	ocmem_audio_bus_paths->vectors = audio_ocmem_bus_vectors;
+
+	audio_ocmem_bus_scale_pdata =
+		kzalloc(sizeof(struct msm_bus_scale_pdata), GFP_KERNEL);
+
+	if (!audio_ocmem_bus_scale_pdata) {
+		dev_err(&pdev->dev, "Failed to allocate memory for platform data\n");
+		goto fail2;
+	}
+
+	audio_ocmem_bus_scale_pdata->usecase = ocmem_audio_bus_paths;
+	audio_ocmem_bus_scale_pdata->num_usecases = 1;
+	audio_ocmem_bus_scale_pdata->name = "audio-ocmem";
+
+	dev_set_drvdata(&pdev->dev, audio_ocmem_bus_scale_pdata);
+	return ret;
+
+fail2:
+	kfree(ocmem_audio_bus_paths);
+fail1:
+	kfree(audio_ocmem_bus_vectors);
+	return ret;
+}
+static int ocmem_audio_client_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
+
+	pr_debug("%s\n", __func__);
+	audio_ocmem_lcl.audio_ocmem_workqueue =
+		alloc_workqueue("ocmem_audio_client_driver_audio",
+					WQ_NON_REENTRANT, 0);
+	if (!audio_ocmem_lcl.audio_ocmem_workqueue) {
+		pr_err("%s: Failed to create ocmem audio work queue\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	audio_ocmem_lcl.voice_ocmem_workqueue =
+		alloc_workqueue("ocmem_audio_client_driver_voice",
+					WQ_NON_REENTRANT, 0);
+	if (!audio_ocmem_lcl.voice_ocmem_workqueue) {
+		pr_err("%s: Failed to create ocmem voice work queue\n",
+			__func__);
+		return -ENOMEM;
+	}
+
+	init_waitqueue_head(&audio_ocmem_lcl.audio_wait);
+	atomic_set(&audio_ocmem_lcl.audio_cond, 1);
+	atomic_set(&audio_ocmem_lcl.audio_exit, 0);
+	spin_lock_init(&audio_ocmem_lcl.audio_lock);
+
+	/* populate platform data */
+	ret = audio_ocmem_platform_data_populate(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: failed to populate platform data, rc = %d\n",
+						__func__, ret);
+		return -ENODEV;
+	}
+	audio_ocmem_bus_scale_pdata = dev_get_drvdata(&pdev->dev);
+
+	audio_ocmem_lcl.audio_ocmem_bus_client =
+		msm_bus_scale_register_client(audio_ocmem_bus_scale_pdata);
+
+	if (!audio_ocmem_lcl.audio_ocmem_bus_client) {
+		pr_err("%s: msm_bus_scale_register_client() failed\n",
+		__func__);
+		return -EFAULT;
+	}
+	audio_ocmem_lcl.audio_hdl = ocmem_notifier_register(OCMEM_LP_AUDIO,
+						&audio_ocmem_client_nb);
+	if (audio_ocmem_lcl.audio_hdl == NULL) {
+		pr_err("%s: Failed to get ocmem handle %d\n", __func__,
+						OCMEM_LP_AUDIO);
+	}
+	audio_ocmem_lcl.lp_memseg_ptr = NULL;
+	return 0;
+}
+
+static int ocmem_audio_client_remove(struct platform_device *pdev)
+{
+	struct msm_bus_scale_pdata *audio_ocmem_bus_scale_pdata = NULL;
+
+	audio_ocmem_bus_scale_pdata = (struct msm_bus_scale_pdata *)
+					dev_get_drvdata(&pdev->dev);
+
+	kfree(audio_ocmem_bus_scale_pdata->usecase->vectors);
+	kfree(audio_ocmem_bus_scale_pdata->usecase);
+	kfree(audio_ocmem_bus_scale_pdata);
+	ocmem_notifier_unregister(audio_ocmem_lcl.audio_hdl,
+					&audio_ocmem_client_nb);
+	return 0;
+}
+static const struct of_device_id msm_ocmem_audio_dt_match[] = {
+	{.compatible = "qcom,msm-ocmem-audio"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, msm_ocmem_audio_dt_match);
+
+static struct platform_driver audio_ocmem_driver = {
+	.driver = {
+		.name = "audio-ocmem",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_ocmem_audio_dt_match,
+	},
+	.probe = ocmem_audio_client_probe,
+	.remove = ocmem_audio_client_remove,
+};
+
+
+static int __init ocmem_audio_client_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&audio_ocmem_driver);
+
+	if (rc)
+		pr_err("%s: Failed to register audio ocmem driver\n", __func__);
+	return rc;
+}
+module_init(ocmem_audio_client_init);
+
+static void __exit ocmem_audio_client_exit(void)
+{
+	platform_driver_unregister(&audio_ocmem_driver);
+}
+
+module_exit(ocmem_audio_client_exit);
diff --git a/sound/soc/msm/qdsp6v2/audio_ocmem.h b/sound/soc/msm/qdsp6v2/audio_ocmem.h
new file mode 100644
index 0000000..e915516
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/audio_ocmem.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _AUDIO_OCMEM_H_
+#define _AUDIO_OCMEM_H_
+
+#include <linux/module.h>
+#include <linux/notifier.h>
+
+#include <mach/ocmem.h>
+
+#define AUDIO 0
+#define VOICE 1
+
+#ifdef CONFIG_AUDIO_OCMEM
+int audio_ocmem_process_req(int id, bool enable);
+int voice_ocmem_process_req(int cid, bool enable);
+int enable_ocmem_after_voice(int cid);
+int disable_ocmem_for_voice(int cid);
+#else
+static inline int audio_ocmem_process_req(int id, bool enable)\
+						{ return 0; }
+static inline int voice_ocmem_process_req(int cid, bool enable)\
+						{ return 0; }
+static inline int enable_ocmem_after_voice(int cid) { return 0; }
+static inline int disable_ocmem_for_voice(int cid) { return 0; }
+#endif
+
+#endif
diff --git a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
index 783a03d..99fd1d3 100644
--- a/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-dai-q6-v2.c
@@ -325,6 +325,494 @@
 	.remove = msm_dai_q6_dai_auxpcm_remove,
 };
 
+static int msm_dai_q6_prepare(struct snd_pcm_substream *substream,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		/* PORT START should be set if prepare called in active state */
+		rc = afe_q6_interface_prepare();
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to open AFE APR\n");
+	}
+	return rc;
+}
+
+static int msm_dai_q6_trigger(struct snd_pcm_substream *substream, int cmd,
+		struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	/*
+	 * Start/stop port without waiting for Q6 AFE response. Need to have
+	 * native q6 AFE driver propagates AFE response in order to handle
+	 * port start/stop command error properly if error does arise.
+	 */
+	pr_debug("%s:port:%d  cmd:%d dai_data->status_mask = %ld",
+		__func__, dai->id, cmd, *dai_data->status_mask);
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+			switch (dai->id) {
+			case VOICE_PLAYBACK_TX:
+			case VOICE_RECORD_TX:
+			case VOICE_RECORD_RX:
+				afe_pseudo_port_start_nowait(dai->id);
+				break;
+			default:
+				afe_port_start_nowait(dai->id,
+					&dai_data->port_config, dai_data->rate);
+				break;
+			}
+			set_bit(STATUS_PORT_STARTED,
+				dai_data->status_mask);
+		}
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+			switch (dai->id) {
+			case VOICE_PLAYBACK_TX:
+			case VOICE_RECORD_TX:
+			case VOICE_RECORD_RX:
+				afe_pseudo_port_stop_nowait(dai->id);
+				break;
+			default:
+				afe_port_stop_nowait(dai->id);
+				break;
+			}
+			clear_bit(STATUS_PORT_STARTED,
+				dai_data->status_mask);
+		}
+		break;
+
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int msm_dai_q6_cdc_hw_params(struct snd_pcm_hw_params *params,
+				    struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	switch (dai_data->channels) {
+	case 2:
+		dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
+		break;
+	case 1:
+		dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	dai_data->rate = params_rate(params);
+	dai_data->port_config.i2s.sample_rate = dai_data->rate;
+	dai_data->port_config.i2s.i2s_cfg_minor_version =
+						AFE_API_VERSION_I2S_CONFIG;
+	dai_data->port_config.i2s.data_format =  AFE_LINEAR_PCM_DATA;
+	dev_dbg(dai->dev, " channel %d sample rate %d entered\n",
+	dai_data->channels, dai_data->rate);
+
+	/* Q6 only supports 16 as now */
+	dai_data->port_config.i2s.bit_width = 16;
+	dai_data->port_config.i2s.channel_mode = 1;
+	return 0;
+}
+
+static u8 num_of_bits_set(u8 sd_line_mask)
+{
+	u8 num_bits_set = 0;
+
+	while (sd_line_mask) {
+		num_bits_set++;
+		sd_line_mask = sd_line_mask & (sd_line_mask - 1);
+	}
+	return num_bits_set;
+}
+
+static int msm_dai_q6_i2s_hw_params(struct snd_pcm_hw_params *params,
+				    struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	struct msm_i2s_data *i2s_pdata =
+			(struct msm_i2s_data *) dai->dev->platform_data;
+
+	dai_data->channels = params_channels(params);
+	if (num_of_bits_set(i2s_pdata->sd_lines) == 1) {
+		switch (dai_data->channels) {
+		case 2:
+			dai_data->port_config.i2s.mono_stereo = MSM_AFE_STEREO;
+			break;
+		case 1:
+			dai_data->port_config.i2s.mono_stereo = MSM_AFE_MONO;
+			break;
+		default:
+			pr_warn("greater than stereo has not been validated");
+			break;
+		}
+	}
+	dai_data->rate = params_rate(params);
+	dai_data->port_config.i2s.sample_rate = dai_data->rate;
+	dai_data->port_config.i2s.i2s_cfg_minor_version =
+						AFE_API_VERSION_I2S_CONFIG;
+	dai_data->port_config.i2s.data_format =  AFE_LINEAR_PCM_DATA;
+	/* Q6 only supports 16 as now */
+	dai_data->port_config.i2s.bit_width = 16;
+	dai_data->port_config.i2s.channel_mode = 1;
+
+	return 0;
+}
+
+static int msm_dai_q6_slim_bus_hw_params(struct snd_pcm_hw_params *params,
+				    struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	dai_data->rate = params_rate(params);
+
+	/* Q6 only supports 16 as now */
+	dai_data->port_config.slim_sch.sb_cfg_minor_version =
+				AFE_API_VERSION_SLIMBUS_CONFIG;
+	dai_data->port_config.slim_sch.bit_width = 16;
+	dai_data->port_config.slim_sch.data_format = 0;
+	dai_data->port_config.slim_sch.num_channels = dai_data->channels;
+	dai_data->port_config.slim_sch.sample_rate = dai_data->rate;
+
+	dev_dbg(dai->dev, "%s:slimbus_dev_id[%hu] bit_wd[%hu] format[%hu]\n"
+		"num_channel %hu  shared_ch_mapping[0]  %hu\n"
+		"slave_port_mapping[1]  %hu slave_port_mapping[2]  %hu\n"
+		"sample_rate %d\n", __func__,
+		dai_data->port_config.slim_sch.slimbus_dev_id,
+		dai_data->port_config.slim_sch.bit_width,
+		dai_data->port_config.slim_sch.data_format,
+		dai_data->port_config.slim_sch.num_channels,
+		dai_data->port_config.slim_sch.shared_ch_mapping[0],
+		dai_data->port_config.slim_sch.shared_ch_mapping[1],
+		dai_data->port_config.slim_sch.shared_ch_mapping[2],
+		dai_data->rate);
+
+	return 0;
+}
+
+static int msm_dai_q6_bt_fm_hw_params(struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai, int stream)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->channels = params_channels(params);
+	dai_data->rate = params_rate(params);
+
+	dev_dbg(dai->dev, "channels %d sample rate %d entered\n",
+		dai_data->channels, dai_data->rate);
+
+	memset(&dai_data->port_config, 0, sizeof(dai_data->port_config));
+
+	return 0;
+}
+
+static int msm_dai_q6_afe_rtproxy_hw_params(struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	dai_data->rate = params_rate(params);
+	dai_data->port_config.rtproxy.num_channels = params_channels(params);
+	dai_data->port_config.rtproxy.sample_rate = params_rate(params);
+
+	pr_debug("channel %d entered,dai_id: %d,rate: %d\n",
+	dai_data->port_config.rtproxy.num_channels, dai->id, dai_data->rate);
+
+	dai_data->port_config.rtproxy.rt_proxy_cfg_minor_version =
+				AFE_API_VERSION_RT_PROXY_CONFIG;
+	dai_data->port_config.rtproxy.bit_width = 16; /* Q6 only supports 16 */
+	dai_data->port_config.rtproxy.interleaved = 1;
+	dai_data->port_config.rtproxy.frame_size = params_period_bytes(params);
+	dai_data->port_config.rtproxy.jitter_allowance =
+				dai_data->port_config.rtproxy.frame_size/2;
+	dai_data->port_config.rtproxy.low_water_mark = 0;
+	dai_data->port_config.rtproxy.high_water_mark = 0;
+
+	return 0;
+}
+
+/* Current implementation assumes hw_param is called once
+ * This may not be the case but what to do when ADM and AFE
+ * port are already opened and parameter changes
+ */
+static int msm_dai_q6_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params,
+				struct snd_soc_dai *dai)
+{
+	int rc = 0;
+
+	switch (dai->id) {
+	case PRIMARY_I2S_TX:
+	case PRIMARY_I2S_RX:
+	case SECONDARY_I2S_RX:
+		rc = msm_dai_q6_cdc_hw_params(params, dai, substream->stream);
+		break;
+	case MI2S_RX:
+		rc = msm_dai_q6_i2s_hw_params(params, dai, substream->stream);
+		break;
+	case SLIMBUS_0_RX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_TX:
+		rc = msm_dai_q6_slim_bus_hw_params(params, dai,
+				substream->stream);
+		break;
+	case INT_BT_SCO_RX:
+	case INT_BT_SCO_TX:
+	case INT_FM_RX:
+	case INT_FM_TX:
+		rc = msm_dai_q6_bt_fm_hw_params(params, dai, substream->stream);
+		break;
+	case RT_PROXY_DAI_001_TX:
+	case RT_PROXY_DAI_001_RX:
+	case RT_PROXY_DAI_002_TX:
+	case RT_PROXY_DAI_002_RX:
+		rc = msm_dai_q6_afe_rtproxy_hw_params(params, dai);
+		break;
+	case VOICE_PLAYBACK_TX:
+	case VOICE_RECORD_RX:
+	case VOICE_RECORD_TX:
+		rc = 0;
+		break;
+	default:
+		dev_err(dai->dev, "invalid AFE port ID\n");
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static void msm_dai_q6_shutdown(struct snd_pcm_substream *substream,
+				struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	int rc = 0;
+
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		switch (dai->id) {
+		case VOICE_PLAYBACK_TX:
+		case VOICE_RECORD_TX:
+		case VOICE_RECORD_RX:
+			pr_debug("%s, stop pseudo port:%d\n",
+						__func__,  dai->id);
+			rc = afe_stop_pseudo_port(dai->id);
+			break;
+		default:
+			rc = afe_close(dai->id); /* can block */
+			break;
+		}
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AFE port\n");
+		pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
+			*dai_data->status_mask);
+		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+	}
+}
+
+static int msm_dai_q6_cdc_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		dai_data->port_config.i2s.ws_src = 1; /* CPU is master */
+		break;
+	case SND_SOC_DAIFMT_CBM_CFM:
+		dai_data->port_config.i2s.ws_src = 0; /* CPU is slave */
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int msm_dai_q6_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	int rc = 0;
+
+	dev_dbg(dai->dev, "enter %s, id = %d fmt[%d]\n", __func__,
+							dai->id, fmt);
+	switch (dai->id) {
+	case PRIMARY_I2S_TX:
+	case PRIMARY_I2S_RX:
+	case MI2S_RX:
+	case SECONDARY_I2S_RX:
+		rc = msm_dai_q6_cdc_set_fmt(dai, fmt);
+		break;
+	default:
+		dev_err(dai->dev, "invalid cpu_dai set_fmt\n");
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_dai_q6_set_channel_map(struct snd_soc_dai *dai,
+				unsigned int tx_num, unsigned int *tx_slot,
+				unsigned int rx_num, unsigned int *rx_slot)
+
+{
+	int rc = 0;
+	struct msm_dai_q6_dai_data *dai_data = dev_get_drvdata(dai->dev);
+	unsigned int i = 0;
+
+	dev_dbg(dai->dev, "enter %s, id = %d\n", __func__, dai->id);
+	switch (dai->id) {
+	case SLIMBUS_0_RX:
+	case SLIMBUS_1_RX:
+		/*
+		 * channel number to be between 128 and 255.
+		 * For RX port use channel numbers
+		 * from 138 to 144 for pre-Taiko
+		 * from 144 to 159 for Taiko
+		 */
+		if (!rx_slot)
+			return -EINVAL;
+		for (i = 0; i < rx_num; i++) {
+			dai_data->port_config.slim_sch.shared_ch_mapping[i] =
+			    rx_slot[i];
+			pr_err("%s: find number of channels[%d] ch[%d]\n",
+			       __func__, i, rx_slot[i]);
+		}
+		dai_data->port_config.slim_sch.num_channels = rx_num;
+		pr_debug("%s:SLIMBUS_0_RX cnt[%d] ch[%d %d]\n", __func__,
+		rx_num, dai_data->port_config.slim_sch.shared_ch_mapping[0],
+		dai_data->port_config.slim_sch.shared_ch_mapping[1]);
+
+		break;
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_TX:
+		/*
+		 * channel number to be between 128 and 255.
+		 * For TX port use channel numbers
+		 * from 128 to 137 for pre-Taiko
+		 * from 128 to 143 for Taiko
+		 */
+		if (!tx_slot)
+			return -EINVAL;
+		for (i = 0; i < tx_num; i++) {
+			dai_data->port_config.slim_sch.shared_ch_mapping[i] =
+			    tx_slot[i];
+			pr_debug("%s: find number of channels[%d] ch[%d]\n",
+				 __func__, i, tx_slot[i]);
+		}
+		dai_data->port_config.slim_sch.num_channels = tx_num;
+		pr_debug("%s:SLIMBUS_0_TX cnt[%d] ch[%d %d]\n", __func__,
+			 tx_num,
+			 dai_data->port_config.slim_sch.shared_ch_mapping[0],
+		dai_data->port_config.slim_sch.shared_ch_mapping[1]);
+		break;
+	default:
+		dev_err(dai->dev, "invalid cpu_dai id %d\n", dai->id);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static struct snd_soc_dai_ops msm_dai_q6_ops = {
+	.prepare	= msm_dai_q6_prepare,
+	.trigger	= msm_dai_q6_trigger,
+	.hw_params	= msm_dai_q6_hw_params,
+	.shutdown	= msm_dai_q6_shutdown,
+	.set_fmt	= msm_dai_q6_set_fmt,
+	.set_channel_map = msm_dai_q6_set_channel_map,
+};
+
+static int msm_dai_q6_dai_probe(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data;
+	int rc = 0;
+
+	dai_data = kzalloc(sizeof(struct msm_dai_q6_dai_data), GFP_KERNEL);
+
+	if (!dai_data) {
+		dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
+		dai->id);
+		rc = -ENOMEM;
+	} else
+		dev_set_drvdata(dai->dev, dai_data);
+
+	return rc;
+}
+
+static int msm_dai_q6_dai_remove(struct snd_soc_dai *dai)
+{
+	struct msm_dai_q6_dai_data *dai_data;
+	int rc;
+
+	dai_data = dev_get_drvdata(dai->dev);
+
+	/* If AFE port is still up, close it */
+	if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+		switch (dai->id) {
+		case VOICE_PLAYBACK_TX:
+		case VOICE_RECORD_TX:
+		case VOICE_RECORD_RX:
+			pr_debug("%s, stop pseudo port:%d\n",
+				 __func__,  dai->id);
+			rc = afe_stop_pseudo_port(dai->id);
+			break;
+		default:
+			rc = afe_close(dai->id); /* can block */
+		}
+		if (IS_ERR_VALUE(rc))
+			dev_err(dai->dev, "fail to close AFE port\n");
+		clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+	}
+	kfree(dai_data);
+	snd_soc_unregister_dai(dai->dev);
+
+	return 0;
+}
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_rx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 1,
+		.rate_min = 8000,
+		.rate_max = 16000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_1_tx_dai = {
+	.capture = {
+		.rates = SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 1,
+		.rate_min = 8000,
+		.rate_max = 16000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
 static int msm_auxpcm_dev_probe(struct platform_device *pdev)
 {
 	int id;
@@ -512,6 +1000,135 @@
 	},
 };
 
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_rx_dai = {
+	.playback = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min = 8000,
+		.rate_max = 48000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_slimbus_tx_dai = {
+	.capture = {
+		.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
+		SNDRV_PCM_RATE_16000,
+		.formats = SNDRV_PCM_FMTBIT_S16_LE,
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min = 8000,
+		.rate_max = 48000,
+	},
+	.ops = &msm_dai_q6_ops,
+	.probe = msm_dai_q6_dai_probe,
+	.remove = msm_dai_q6_dai_remove,
+};
+
+static int msm_dai_q6_dev_probe(struct platform_device *pdev)
+{
+	int rc, id;
+	const char *q6_dev_id = "qcom,msm-dai-q6-dev-id";
+
+	rc = of_property_read_u32(pdev->dev.of_node, q6_dev_id, &id);
+	if (rc) {
+		dev_err(&pdev->dev,
+			"%s: missing %s in dt node\n", __func__, q6_dev_id);
+		return rc;
+	}
+
+	pdev->id = id;
+	dev_set_name(&pdev->dev, "%s.%d", "msm-dai-q6-dev", id);
+
+	pr_debug("%s: dev name %s, id:%d\n", __func__,
+		 dev_name(&pdev->dev), pdev->id);
+
+	switch (id) {
+	case SLIMBUS_0_RX:
+		rc = snd_soc_register_dai(&pdev->dev,
+					  &msm_dai_q6_slimbus_rx_dai);
+		break;
+	case SLIMBUS_0_TX:
+		rc = snd_soc_register_dai(&pdev->dev,
+					  &msm_dai_q6_slimbus_tx_dai);
+		break;
+	case SLIMBUS_1_RX:
+		rc = snd_soc_register_dai(&pdev->dev,
+					  &msm_dai_q6_slimbus_1_rx_dai);
+		break;
+	case SLIMBUS_1_TX:
+		rc = snd_soc_register_dai(&pdev->dev,
+					  &msm_dai_q6_slimbus_1_tx_dai);
+		break;
+	default:
+		rc = -ENODEV;
+		break;
+	}
+
+	return rc;
+}
+
+static int msm_dai_q6_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_dai(&pdev->dev);
+	return 0;
+}
+
+static const struct of_device_id msm_dai_q6_dev_dt_match[] = {
+	{ .compatible = "qcom,msm-dai-q6-dev", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, msm_dai_q6_dev_dt_match);
+
+static struct platform_driver msm_dai_q6_dev = {
+	.probe  = msm_dai_q6_dev_probe,
+	.remove = msm_dai_q6_dev_remove,
+	.driver = {
+		.name = "msm-dai-q6-dev",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_q6_dev_dt_match,
+	},
+};
+
+static int msm_dai_q6_probe(struct platform_device *pdev)
+{
+	int rc;
+	pr_debug("%s: dev name %s, id:%d\n", __func__,
+		 dev_name(&pdev->dev), pdev->id);
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (rc) {
+		dev_err(&pdev->dev, "%s: failed to add child nodes, rc=%d\n",
+			__func__, rc);
+	} else
+		dev_dbg(&pdev->dev, "%s: added child node\n", __func__);
+
+	return rc;
+}
+
+static int msm_dai_q6_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id msm_dai_q6_dt_match[] = {
+	{ .compatible = "qcom,msm-dai-q6", },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, msm_dai_q6_dt_match);
+static struct platform_driver msm_dai_q6 = {
+	.probe  = msm_dai_q6_probe,
+	.remove = msm_dai_q6_remove,
+	.driver = {
+		.name = "msm-dai-q6",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_dai_q6_dt_match,
+	},
+};
 
 static int __init msm_dai_q6_init(void)
 {
@@ -522,10 +1139,27 @@
 		goto fail;
 
 	rc = platform_driver_register(&msm_auxpcm_resource);
-
 	if (rc) {
 		pr_err("%s: fail to register cpu dai driver\n", __func__);
 		platform_driver_unregister(&msm_auxpcm_dev);
+		goto fail;
+	}
+
+	rc = platform_driver_register(&msm_dai_q6);
+	if (rc) {
+		pr_err("%s: fail to register dai q6 driver", __func__);
+		platform_driver_unregister(&msm_auxpcm_dev);
+		platform_driver_unregister(&msm_auxpcm_resource);
+		goto fail;
+	}
+
+	rc = platform_driver_register(&msm_dai_q6_dev);
+	if (rc) {
+		pr_err("%s: fail to register dai q6 dev driver", __func__);
+		platform_driver_unregister(&msm_dai_q6);
+		platform_driver_unregister(&msm_auxpcm_dev);
+		platform_driver_unregister(&msm_auxpcm_resource);
+		goto fail;
 	}
 fail:
 	return rc;
@@ -534,6 +1168,8 @@
 
 static void __exit msm_dai_q6_exit(void)
 {
+	platform_driver_unregister(&msm_dai_q6_dev);
+	platform_driver_unregister(&msm_dai_q6);
 	platform_driver_unregister(&msm_auxpcm_dev);
 	platform_driver_unregister(&msm_auxpcm_resource);
 }
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
index 1ac872d..047e0f0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-lpa-v2.c
@@ -35,6 +35,7 @@
 
 #include "msm-pcm-q6-v2.h"
 #include "msm-pcm-routing-v2.h"
+#include "audio_ocmem.h"
 
 static struct audio_locks the_locks;
 
@@ -223,6 +224,7 @@
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 		prtd->pcm_irq_pos = 0;
+		audio_ocmem_process_req(AUDIO, true);
 	case SNDRV_PCM_TRIGGER_RESUME:
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
 		pr_debug("SNDRV_PCM_TRIGGER_START\n");
@@ -231,6 +233,7 @@
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		audio_ocmem_process_req(AUDIO, false);
 		atomic_set(&prtd->start, 0);
 		if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
 			break;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 67ee8e4..fbbb3a5 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -217,11 +217,12 @@
 	fe_dai_map[fedai_id][session_type] = dspst_id;
 	for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
 		if (!is_be_dai_extproc(i) &&
-		   (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
-		   (msm_bedais[i].active) &&
-		   (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+		    (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+		    (msm_bedais[i].active) &&
+		    (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
 			mode = afe_get_port_type(msm_bedais[i].port_id);
-			/*adm_connect_afe_port needs to be called*/
+			adm_connect_afe_port(mode, dspst_id,
+					     msm_bedais[i].port_id);
 			break;
 		}
 	}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
index 630405a..492569b 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -30,6 +30,7 @@
 #include "msm-pcm-q6-v2.h"
 #include "msm-pcm-routing-v2.h"
 #include "q6voice.h"
+#include "audio_ocmem.h"
 
 #define VOIP_MAX_Q_LEN 10
 #define VOIP_MAX_VOC_PKT_SIZE 640
@@ -452,6 +453,8 @@
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
 		pr_debug("%s: Trigger start\n", __func__);
+		if ((!prtd->capture_start) && (!prtd->playback_start))
+			voice_ocmem_process_req(VOICE, true);
 		if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
 			prtd->capture_start = 1;
 		else
@@ -459,6 +462,8 @@
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+		if (prtd->capture_start && prtd->playback_start)
+			voice_ocmem_process_req(VOICE, false);
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
 			prtd->playback_start = 0;
 		else
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index aed6273..e5837b25 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -16,7 +16,7 @@
 #include <linux/jiffies.h>
 #include <linux/uaccess.h>
 #include <linux/atomic.h>
-
+#include <linux/wait.h>
 
 #include <mach/qdsp6v2/audio_acdb.h>
 #include <mach/qdsp6v2/rtac.h>
@@ -25,7 +25,7 @@
 #include <mach/qdsp6v2/apr.h>
 #include <sound/q6adm-v2.h>
 #include <sound/q6audio-v2.h>
-
+#include <sound/q6afe-v2.h>
 
 #define TIMEOUT_MS 1000
 
@@ -78,7 +78,7 @@
 			return 0;
 		}
 		if (data->opcode == APR_BASIC_RSP_RESULT) {
-			pr_debug("APR_BASIC_RSP_RESULT\n");
+			pr_debug("APR_BASIC_RSP_RESULT id %x\n", payload[0]);
 			switch (payload[0]) {
 			case ADM_CMD_SET_PP_PARAMS_V5:
 				if (rtac_make_adm_callback(
@@ -142,6 +142,76 @@
 	pr_debug("%s\n", __func__);
 }
 
+int adm_connect_afe_port(int mode, int session_id, int port_id)
+{
+	struct adm_cmd_connect_afe_port_v5	cmd;
+	int ret = 0;
+	int index;
+
+	pr_debug("%s: port %d session id:%d mode:%d\n", __func__,
+				port_id, session_id, mode);
+
+	port_id = afe_convert_virtual_to_portid(port_id);
+
+	if (afe_validate_port(port_id) < 0) {
+		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
+		return -ENODEV;
+	}
+	if (this_adm.apr == NULL) {
+		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+						0xFFFFFFFF, &this_adm);
+		if (this_adm.apr == NULL) {
+			pr_err("%s: Unable to register ADM\n", __func__);
+			ret = -ENODEV;
+			return ret;
+		}
+		rtac_set_adm_handle(this_adm.apr);
+	}
+	index = afe_get_port_index(port_id);
+	pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
+
+	cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	cmd.hdr.pkt_size = sizeof(cmd);
+	cmd.hdr.src_svc = APR_SVC_ADM;
+	cmd.hdr.src_domain = APR_DOMAIN_APPS;
+	cmd.hdr.src_port = port_id;
+	cmd.hdr.dest_svc = APR_SVC_ADM;
+	cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+	cmd.hdr.dest_port = port_id;
+	cmd.hdr.token = port_id;
+	cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT_V5;
+
+	cmd.mode = mode;
+	cmd.session_id = session_id;
+	cmd.afe_port_id = port_id;
+
+	atomic_set(&this_adm.copp_stat[index], 0);
+	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
+	if (ret < 0) {
+		pr_err("%s:ADM enable for port %d failed\n",
+					__func__, port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	/* Wait for the callback with copp id */
+	ret = wait_event_timeout(this_adm.wait[index],
+		atomic_read(&this_adm.copp_stat[index]),
+		msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s ADM connect AFE failed for port %d\n", __func__,
+							port_id);
+		ret = -EINVAL;
+		goto fail_cmd;
+	}
+	atomic_inc(&this_adm.copp_cnt[index]);
+	return 0;
+
+fail_cmd:
+
+	return ret;
+}
+
 int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
 {
 	struct adm_cmd_device_open_v5	open;
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 4875a69..756cb18 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -144,6 +144,9 @@
 	case HDMI_RX:
 	case SLIMBUS_0_RX:
 	case SLIMBUS_1_RX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_3_RX:
+	case SLIMBUS_4_RX:
 	case INT_BT_SCO_RX:
 	case INT_BT_A2DP_RX:
 	case INT_FM_RX:
@@ -160,6 +163,9 @@
 	case VOICE_RECORD_TX:
 	case SLIMBUS_0_TX:
 	case SLIMBUS_1_TX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_TX:
+	case SLIMBUS_4_TX:
 	case INT_FM_TX:
 	case VOICE_RECORD_RX:
 	case INT_BT_SCO_TX:
@@ -168,6 +174,7 @@
 		break;
 
 	default:
+		WARN_ON(1);
 		pr_err("%s: invalid port id %d\n", __func__, port_id);
 		ret = -EINVAL;
 	}
@@ -255,7 +262,6 @@
 		ret = -EINVAL;
 		return ret;
 	}
-	pr_err("%s: %d %d\n", __func__, port_id, rate);
 	index = q6audio_get_port_index(port_id);
 	if (q6audio_validate_port(port_id) < 0)
 		return -EINVAL;
@@ -279,11 +285,11 @@
 
 	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
 				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
-	config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
+	config.hdr.pkt_size = sizeof(config);
 	config.hdr.src_port = 0;
 	config.hdr.dest_port = 0;
-
 	config.hdr.token = index;
+
 	switch (port_id) {
 	case PRIMARY_I2S_RX:
 	case PRIMARY_I2S_TX:
@@ -320,15 +326,15 @@
 		goto fail_cmd;
 	}
 	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM_V2;
-	config.param.port_id = port_id;
-	config.param.payload_size = (afe_sizeof_cfg_cmd(port_id) +
-				sizeof(struct afe_port_param_data_v2));
+	config.param.port_id = q6audio_get_port_id(port_id);
+	config.param.payload_size = sizeof(config) - sizeof(struct apr_hdr) -
+				    sizeof(config.param);
 	config.param.payload_address_lsw = 0x00;
 	config.param.payload_address_msw = 0x00;
 	config.param.mem_map_handle = 0x00;
 	config.pdata.module_id = AFE_MODULE_AUDIO_DEV_INTERFACE;
 	config.pdata.param_id = cfg_type;
-	config.pdata.param_size =  afe_sizeof_cfg_cmd(port_id);
+	config.pdata.param_size = sizeof(config.port);
 
 	config.port = *afe_config;
 
@@ -348,9 +354,11 @@
 	start.hdr.pkt_size = sizeof(start);
 	start.hdr.src_port = 0;
 	start.hdr.dest_port = 0;
-	start.hdr.token = 0;
+	start.hdr.token = index;
 	start.hdr.opcode = AFE_PORT_CMD_DEVICE_START;
-	start.port_id = port_id;
+	start.port_id = q6audio_get_port_id(port_id);
+	pr_debug("%s: cmd device start opcode[0x%x] port id[0x%x]\n",
+		 __func__, start.hdr.opcode, start.port_id);
 
 	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
 
@@ -367,6 +375,45 @@
 	return ret;
 }
 
+int afe_get_port_index(u16 port_id)
+{
+	switch (port_id) {
+	case PRIMARY_I2S_RX: return IDX_PRIMARY_I2S_RX;
+	case PRIMARY_I2S_TX: return IDX_PRIMARY_I2S_TX;
+	case PCM_RX: return IDX_PCM_RX;
+	case PCM_TX: return IDX_PCM_TX;
+	case SECONDARY_I2S_RX: return IDX_SECONDARY_I2S_RX;
+	case SECONDARY_I2S_TX: return IDX_SECONDARY_I2S_TX;
+	case MI2S_RX: return IDX_MI2S_RX;
+	case MI2S_TX: return IDX_MI2S_TX;
+	case HDMI_RX: return IDX_HDMI_RX;
+	case RSVD_2: return IDX_RSVD_2;
+	case RSVD_3: return IDX_RSVD_3;
+	case DIGI_MIC_TX: return IDX_DIGI_MIC_TX;
+	case VOICE_RECORD_RX: return IDX_VOICE_RECORD_RX;
+	case VOICE_RECORD_TX: return IDX_VOICE_RECORD_TX;
+	case VOICE_PLAYBACK_TX: return IDX_VOICE_PLAYBACK_TX;
+	case SLIMBUS_0_RX: return IDX_SLIMBUS_0_RX;
+	case SLIMBUS_0_TX: return IDX_SLIMBUS_0_TX;
+	case SLIMBUS_1_RX: return IDX_SLIMBUS_1_RX;
+	case SLIMBUS_1_TX: return IDX_SLIMBUS_1_TX;
+	case SLIMBUS_2_RX: return IDX_SLIMBUS_2_RX;
+	case SLIMBUS_2_TX: return IDX_SLIMBUS_2_TX;
+	case SLIMBUS_3_RX: return IDX_SLIMBUS_3_RX;
+	case INT_BT_SCO_RX: return IDX_INT_BT_SCO_RX;
+	case INT_BT_SCO_TX: return IDX_INT_BT_SCO_TX;
+	case INT_BT_A2DP_RX: return IDX_INT_BT_A2DP_RX;
+	case INT_FM_RX: return IDX_INT_FM_RX;
+	case INT_FM_TX: return IDX_INT_FM_TX;
+	case RT_PROXY_PORT_001_RX: return IDX_RT_PROXY_PORT_001_RX;
+	case RT_PROXY_PORT_001_TX: return IDX_RT_PROXY_PORT_001_TX;
+	case SLIMBUS_4_RX: return IDX_SLIMBUS_4_RX;
+	case SLIMBUS_4_TX: return IDX_SLIMBUS_4_TX;
+
+	default: return -EINVAL;
+	}
+}
+
 int afe_open(u16 port_id,
 		union afe_port_config *afe_config, int rate)
 {
@@ -1469,6 +1516,75 @@
 	return ret;
 }
 
+int afe_validate_port(u16 port_id)
+{
+	int ret;
+
+	switch (port_id) {
+	case PRIMARY_I2S_RX:
+	case PRIMARY_I2S_TX:
+	case PCM_RX:
+	case PCM_TX:
+	case SECONDARY_I2S_RX:
+	case SECONDARY_I2S_TX:
+	case MI2S_RX:
+	case MI2S_TX:
+	case HDMI_RX:
+	case RSVD_2:
+	case RSVD_3:
+	case DIGI_MIC_TX:
+	case VOICE_RECORD_RX:
+	case VOICE_RECORD_TX:
+	case VOICE_PLAYBACK_TX:
+	case SLIMBUS_0_RX:
+	case SLIMBUS_0_TX:
+	case SLIMBUS_1_RX:
+	case SLIMBUS_1_TX:
+	case SLIMBUS_2_RX:
+	case SLIMBUS_2_TX:
+	case SLIMBUS_3_RX:
+	case INT_BT_SCO_RX:
+	case INT_BT_SCO_TX:
+	case INT_BT_A2DP_RX:
+	case INT_FM_RX:
+	case INT_FM_TX:
+	case RT_PROXY_PORT_001_RX:
+	case RT_PROXY_PORT_001_TX:
+	case SLIMBUS_4_RX:
+	case SLIMBUS_4_TX:
+	{
+		ret = 0;
+		break;
+	}
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+int afe_convert_virtual_to_portid(u16 port_id)
+{
+	int ret;
+
+	/*
+	 * if port_id is virtual, convert to physical..
+	 * if port_id is already physical, return physical
+	 */
+	if (afe_validate_port(port_id) < 0) {
+		if (port_id == RT_PROXY_DAI_001_RX ||
+		    port_id == RT_PROXY_DAI_001_TX ||
+		    port_id == RT_PROXY_DAI_002_RX ||
+		    port_id == RT_PROXY_DAI_002_TX)
+			ret = VIRTUAL_ID_TO_PORTID(port_id);
+		else
+			ret = -EINVAL;
+	} else
+		ret = port_id;
+
+	return ret;
+}
 int afe_port_stop_nowait(int port_id)
 {
 	struct afe_port_cmd_device_stop stop;
diff --git a/sound/soc/msm/qdsp6v2/q6core.c b/sound/soc/msm/qdsp6v2/q6core.c
new file mode 100644
index 0000000..2c31d39
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6core.c
@@ -0,0 +1,211 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <mach/msm_smd.h>
+#include <mach/qdsp6v2/apr.h>
+#include "q6core.h"
+#include <mach/ocmem.h>
+
+#define TIMEOUT_MS 1000
+
+struct q6core_str {
+	struct apr_svc *core_handle_q;
+	wait_queue_head_t bus_bw_req_wait;
+	u32 bus_bw_resp_received;
+	struct avcs_cmd_rsp_get_low_power_segments_info_t *lp_ocm_payload;
+};
+
+struct q6core_str q6core_lcl;
+
+static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
+{
+	uint32_t *payload1;
+	uint32_t nseg;
+	int i, j;
+
+	pr_info("core msg: payload len = %u, apr resp opcode = 0x%X\n",
+		data->payload_size, data->opcode);
+
+	switch (data->opcode) {
+
+	case APR_BASIC_RSP_RESULT:{
+
+		if (data->payload_size == 0) {
+			pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
+					__func__);
+			return 0;
+		}
+
+		payload1 = data->payload;
+
+		switch (payload1[0]) {
+
+		case AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO:
+			pr_info("%s: Cmd = AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO status[0x%x]\n",
+				__func__, payload1[1]);
+			break;
+		default:
+			pr_err("Invalid cmd rsp[0x%x][0x%x]\n",
+					payload1[0], payload1[1]);
+			break;
+		}
+		break;
+	}
+
+	case AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO:
+		payload1 = data->payload;
+		pr_info("%s: cmd = AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO num_segments = 0x%x\n",
+					__func__, payload1[0]);
+		nseg = payload1[0];
+		q6core_lcl.lp_ocm_payload->num_segments = nseg;
+		q6core_lcl.lp_ocm_payload->bandwidth = payload1[1];
+		for (i = 0, j = 2; i < nseg; i++) {
+			q6core_lcl.lp_ocm_payload->mem_segment[i].type =
+					(payload1[j] & 0xffff);
+			q6core_lcl.lp_ocm_payload->mem_segment[i].category =
+					((payload1[j++] >> 16) & 0xffff);
+			q6core_lcl.lp_ocm_payload->mem_segment[i].size =
+					payload1[j++];
+			q6core_lcl.lp_ocm_payload->
+				mem_segment[i].start_address_lsw =
+				payload1[j++];
+			q6core_lcl.lp_ocm_payload->
+				mem_segment[i].start_address_msw =
+				payload1[j++];
+		}
+
+		q6core_lcl.bus_bw_resp_received = 1;
+		wake_up(&q6core_lcl.bus_bw_req_wait);
+		break;
+
+	case RESET_EVENTS:{
+		pr_debug("Reset event received in Core service");
+		apr_reset(q6core_lcl.core_handle_q);
+		q6core_lcl.core_handle_q = NULL;
+		break;
+	}
+
+	default:
+		pr_err("Message id from adsp core svc: %d\n", data->opcode);
+		break;
+	}
+
+	return 0;
+}
+
+
+void ocm_core_open(void)
+{
+	if (q6core_lcl.core_handle_q == NULL)
+		q6core_lcl.core_handle_q = apr_register("ADSP", "CORE",
+					aprv2_core_fn_q, 0xFFFFFFFF, NULL);
+	pr_debug("Open_q %p\n", q6core_lcl.core_handle_q);
+	if (q6core_lcl.core_handle_q == NULL)
+		pr_err("%s: Unable to register CORE\n", __func__);
+}
+
+int core_get_low_power_segments(
+		struct avcs_cmd_rsp_get_low_power_segments_info_t **lp_memseg)
+{
+	struct avcs_cmd_get_low_power_segments_info lp_ocm_cmd;
+	u8 *cptr = NULL;
+	int ret = 0;
+
+	pr_debug("%s: ", __func__);
+
+	ocm_core_open();
+	if (q6core_lcl.core_handle_q == NULL) {
+		pr_info("%s: apr registration for CORE failed\n", __func__);
+		return -ENODEV;
+	}
+
+	cptr = kzalloc(
+		sizeof(struct avcs_cmd_rsp_get_low_power_segments_info_t),
+		GFP_KERNEL);
+	if (!cptr) {
+		pr_err("%s: Failed to allocate memory for low power segment struct\n",
+				__func__);
+		return -ENOMEM;
+	}
+	q6core_lcl.lp_ocm_payload =
+		(struct avcs_cmd_rsp_get_low_power_segments_info_t *) cptr;
+
+	lp_ocm_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+	lp_ocm_cmd.hdr.pkt_size =
+		sizeof(struct avcs_cmd_get_low_power_segments_info);
+
+	lp_ocm_cmd.hdr.src_port = 0;
+	lp_ocm_cmd.hdr.dest_port = 0;
+	lp_ocm_cmd.hdr.token = 0;
+	lp_ocm_cmd.hdr.opcode = AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO;
+
+
+	ret = apr_send_pkt(q6core_lcl.core_handle_q, (uint32_t *) &lp_ocm_cmd);
+	if (ret < 0) {
+		pr_err("%s: CORE low power segment request failed\n", __func__);
+		goto fail_cmd;
+	}
+
+	ret = wait_event_timeout(q6core_lcl.bus_bw_req_wait,
+				(q6core_lcl.bus_bw_resp_received == 1),
+				msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("%s: wait_event timeout for GET_LOW_POWER_SEGMENTS\n",
+				__func__);
+		ret = -ETIME;
+		goto fail_cmd;
+	}
+
+	*lp_memseg = q6core_lcl.lp_ocm_payload;
+	return 0;
+
+fail_cmd:
+	return ret;
+}
+
+
+static int __init core_init(void)
+{
+	init_waitqueue_head(&q6core_lcl.bus_bw_req_wait);
+	q6core_lcl.bus_bw_resp_received = 0;
+
+	q6core_lcl.core_handle_q = NULL;
+	q6core_lcl.lp_ocm_payload = kzalloc(
+	sizeof(struct avcs_cmd_rsp_get_low_power_segments_info_t), GFP_KERNEL);
+
+	if (!q6core_lcl.lp_ocm_payload) {
+		pr_err("%s: Failed to allocate memory for low power segment struct\n",
+				__func__);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+module_init(core_init);
+
+static void __exit core_exit(void)
+{
+	kfree(q6core_lcl.lp_ocm_payload);
+}
+module_exit(core_exit);
+MODULE_DESCRIPTION("ADSP core driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/sound/soc/msm/qdsp6v2/q6core.h b/sound/soc/msm/qdsp6v2/q6core.h
new file mode 100644
index 0000000..5cb6098
--- /dev/null
+++ b/sound/soc/msm/qdsp6v2/q6core.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __Q6CORE_H__
+#define __Q6CORE_H__
+#include <mach/qdsp6v2/apr.h>
+#include <mach/ocmem.h>
+
+
+#define AVCS_CMD_GET_LOW_POWER_SEGMENTS_INFO              0x00012903
+
+struct avcs_cmd_get_low_power_segments_info {
+	struct apr_hdr hdr;
+} __packed;
+
+
+#define AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO           0x00012904
+
+/* @brief AVCS_CMDRSP_GET_LOW_POWER_SEGMENTS_INFO payload
+ * structure. Payload for this event comprises one instance of
+ * avcs_cmd_rsp_get_low_power_segments_info_t, followed
+ * immediately by num_segments number of instances of the
+ * avcs_mem_segment_t structure.
+ */
+
+/* Types of Low Power Memory Segments. */
+#define READ_ONLY_SEGMENT      1
+/*< Read Only memory segment. */
+#define READ_WRITE_SEGMENT     2
+/*< Read Write memory segment. */
+/*Category indicates whether audio/os/sensor segments. */
+#define AUDIO_SEGMENT          1
+/*< Audio memory segment. */
+#define OS_SEGMENT             2
+/*< QDSP6's OS memory segment. */
+
+/* @brief Payload structure for AVS low power memory segment
+ *  structure.
+ */
+struct avcs_mem_segment_t {
+	uint16_t              type;
+/*< Indicates which type of memory this segment is.
+ *Allowed values: READ_ONLY_SEGMENT or READ_WRITE_SEGMENT only.
+ */
+	uint16_t              category;
+/*< Indicates whether audio or OS segments.
+ *Allowed values: AUDIO_SEGMENT or OS_SEGMENT only.
+ */
+	uint32_t              size;
+/*< Size (in bytes) of this segment.
+ * Will be a non-zero value.
+ */
+	uint32_t              start_address_lsw;
+/*< Lower 32 bits of the 64-bit physical start address
+ * of this segment.
+ */
+	uint32_t              start_address_msw;
+/*< Upper 32 bits of the 64-bit physical start address
+ * of this segment.
+ */
+};
+
+struct avcs_cmd_rsp_get_low_power_segments_info_t {
+	uint32_t              num_segments;
+/*< Number of segments in this response.
+ * 0: there are no known sections that should be mapped
+ * from DDR to OCMEM.
+ * >0: the number of memory segments in the following list.
+ */
+
+	uint32_t              bandwidth;
+/*< Required OCMEM read/write bandwidth (in bytes per second)
+ * if OCMEM is granted.
+ * 0 if num_segments = 0
+ * >0 if num_segments > 0.
+ */
+	struct avcs_mem_segment_t mem_segment[OCMEM_MAX_CHUNKS];
+};
+
+
+int core_get_low_power_segments(
+			struct avcs_cmd_rsp_get_low_power_segments_info_t **);
+
+#endif /* __Q6CORE_H__ */
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index d4ce733..704d63a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -148,7 +148,7 @@
 		return 0;
 
 	if (output[type].user_set) {
-		evname = __event_name(attr->type, attr->config);
+		evname = __event_name(attr->type, attr->config, NULL);
 		pr_err("Samples for '%s' event do not have %s attribute set. "
 		       "Cannot print '%s' field.\n",
 		       evname, sample_msg, output_field2str(field));
@@ -157,7 +157,7 @@
 
 	/* user did not ask for it explicitly so remove from the default list */
 	output[type].fields &= ~field;
-	evname = __event_name(attr->type, attr->config);
+	evname = __event_name(attr->type, attr->config, NULL);
 	pr_debug("Samples for '%s' event do not have %s attribute set. "
 		 "Skipping '%s' field.\n",
 		 evname, sample_msg, output_field2str(field));
@@ -305,7 +305,7 @@
 			if (event)
 				evname = event->name;
 		} else
-			evname = __event_name(attr->type, attr->config);
+			evname = __event_name(attr->type, attr->config, NULL);
 
 		printf("%s: ", evname ? evname : "[unknown]");
 	}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 5b3a0ef..be2e0c5 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -301,10 +301,10 @@
 	if (evsel->name)
 		return evsel->name;
 
-	return __event_name(type, config);
+	return __event_name(type, config, NULL);
 }
 
-const char *__event_name(int type, u64 config)
+const char *__event_name(int type, u64 config, char *pmu_name)
 {
 	static char buf[32];
 
@@ -349,7 +349,12 @@
 		return tracepoint_id_to_name(config);
 
 	default:
-		break;
+		if (pmu_name) {
+			snprintf(buf, sizeof(buf), "%s 0x%" PRIx64, pmu_name,
+					config);
+			return buf;
+		} else
+			break;
 	}
 
 	return "unknown";
@@ -630,6 +635,32 @@
 	return 0;
 }
 
+int parse_events_add_numeric_legacy(struct list_head *list, int *idx,
+			     const char *name, unsigned long config,
+			     struct list_head *head_config)
+{
+	struct perf_event_attr attr;
+	struct perf_pmu *pmu;
+	char *pmu_name = strdup(name);
+
+	memset(&attr, 0, sizeof(attr));
+
+	pmu = perf_pmu__find(pmu_name);
+
+	if (!pmu)
+		return -EINVAL;
+
+	attr.type = pmu->type;
+	attr.config = config;
+
+	if (head_config &&
+	    config_attr(&attr, head_config, 1))
+		return -EINVAL;
+
+	return add_event(list, idx, &attr,
+			 (char *) __event_name(pmu->type, config, pmu_name));
+}
+
 int parse_events_add_numeric(struct list_head *list, int *idx,
 			     unsigned long type, unsigned long config,
 			     struct list_head *head_config)
@@ -645,7 +676,7 @@
 		return -EINVAL;
 
 	return add_event(list, idx, &attr,
-			 (char *) __event_name(type, config));
+			 (char *) __event_name(type, config, NULL));
 }
 
 int parse_events_add_pmu(struct list_head *list, int *idx,
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index ca069f8..4da2f3c 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -23,7 +23,7 @@
 
 const char *event_type(int type);
 const char *event_name(struct perf_evsel *event);
-extern const char *__event_name(int type, u64 config);
+extern const char *__event_name(int type, u64 config, char *name);
 
 extern int parse_events_option(const struct option *opt, const char *str,
 			       int unset);
@@ -67,6 +67,10 @@
 int parse_events_add_raw(struct perf_evlist *evlist, unsigned long config,
 			 unsigned long config1, unsigned long config2,
 			 char *mod);
+int parse_events_add_numeric_legacy(struct list_head *list, int *idx,
+			     const char *name, unsigned long config,
+			     struct list_head *head_config);
+
 int parse_events_add_numeric(struct list_head *list, int *idx,
 			     unsigned long type, unsigned long config,
 			     struct list_head *head_config);
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 07b292d..581cd94 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -156,13 +156,13 @@
 event_legacy_shared_raw:
 PE_SH_RAW
 {
-	ABORT_ON(parse_events_add_numeric(list_event, idx, 6, $1, NULL));
+	ABORT_ON(parse_events_add_numeric_legacy(list_event, idx, "msm-l2", $1, NULL));
 }
 
 event_legacy_fabric_raw:
 PE_FAB_RAW
 {
-	ABORT_ON(parse_events_add_numeric(list_event, idx, 7, $1, NULL));
+	ABORT_ON(parse_events_add_numeric_legacy(list_event, idx, "msm-busmon", $1, NULL));
 }
 
 event_config: